Last active
July 2, 2018 22:35
-
-
Save pcn/c0fa9fb5fba1b1d719886347f4c9b3f0 to your computer and use it in GitHub Desktop.
Salt 2018.3.2 breaks the cache runner, maybe other things too?
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Putt his runner in your runners dir and you'll have the documented behavior back. | |
# | |
# -*- coding: utf-8 -*- | |
''' | |
Return cached data from minions | |
''' | |
from __future__ import absolute_import, print_function, unicode_literals | |
# Import python libs | |
import fnmatch | |
import logging | |
import os | |
# Import salt libs | |
import salt.config | |
from salt.ext import six | |
import salt.log | |
import salt.utils.args | |
import salt.utils.gitfs | |
import myutils.master | |
import salt.payload | |
import salt.cache | |
import salt.fileserver.gitfs | |
import salt.pillar.git_pillar | |
import salt.runners.winrepo | |
from salt.exceptions import SaltInvocationError | |
from salt.fileserver import clear_lock as _clear_lock | |
log = logging.getLogger(__name__) | |
__func_alias__ = { | |
'list_': 'list', | |
} | |
def mograins(tgt=None, tgt_type='glob', **kwargs): | |
''' | |
.. versionchanged:: 2017.7.0 | |
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier | |
releases must use ``expr_form``. | |
Return cached grains of the targeted minions | |
CLI Example: | |
.. code-block:: bash | |
salt-run cache.grains | |
''' | |
pillar_util = myutils.master.MasterPillarUtil(tgt, tgt_type, | |
use_cached_grains=True, | |
grains_fallback=True, | |
opts=__opts__) | |
cached_grains = pillar_util.get_minion_grains() | |
return cached_grains | |
def grains(tgt=None, tgt_type='glob', **kwargs): | |
''' | |
.. versionchanged:: 2017.7.0 | |
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier | |
releases must use ``expr_form``. | |
Return cached grains of the targeted minions | |
CLI Example: | |
.. code-block:: bash | |
salt-run cache.grains | |
''' | |
pillar_util = myutils.master.MasterPillarUtil(tgt, tgt_type, | |
use_cached_grains=True, | |
grains_fallback=False, | |
opts=__opts__) | |
cached_grains = pillar_util.get_minion_grains() | |
return cached_grains | |
def pillar(tgt=None, tgt_type='glob', **kwargs): | |
''' | |
.. versionchanged:: 2017.7.0 | |
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier | |
releases must use ``expr_form``. | |
Return cached pillars of the targeted minions | |
CLI Example: | |
.. code-block:: bash | |
salt-run cache.pillar | |
''' | |
pillar_util = myutils.master.MasterPillarUtil(tgt, tgt_type, | |
use_cached_grains=True, | |
grains_fallback=False, | |
use_cached_pillar=True, | |
pillar_fallback=False, | |
opts=__opts__) | |
cached_pillar = pillar_util.get_minion_pillar() | |
return cached_pillar | |
def mine(tgt=None, tgt_type='glob', **kwargs): | |
''' | |
.. versionchanged:: 2017.7.0 | |
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier | |
releases must use ``expr_form``. | |
Return cached mine data of the targeted minions | |
CLI Example: | |
.. code-block:: bash | |
salt-run cache.mine | |
''' | |
pillar_util = myutils.master.MasterPillarUtil(tgt, tgt_type, | |
use_cached_grains=False, | |
grains_fallback=False, | |
use_cached_pillar=False, | |
pillar_fallback=False, | |
opts=__opts__) | |
cached_mine = pillar_util.get_cached_mine_data() | |
return cached_mine | |
def _clear_cache(tgt=None, | |
tgt_type='glob', | |
clear_pillar_flag=False, | |
clear_grains_flag=False, | |
clear_mine_flag=False, | |
clear_mine_func_flag=None): | |
''' | |
Clear the cached data/files for the targeted minions. | |
''' | |
if tgt is None: | |
return False | |
pillar_util = myutils.master.MasterPillarUtil(tgt, tgt_type, | |
use_cached_grains=True, | |
grains_fallback=False, | |
use_cached_pillar=True, | |
pillar_fallback=False, | |
opts=__opts__) | |
return pillar_util.clear_cached_minion_data(clear_pillar=clear_pillar_flag, | |
clear_grains=clear_grains_flag, | |
clear_mine=clear_mine_flag, | |
clear_mine_func=clear_mine_func_flag) | |
def clear_pillar(tgt=None, tgt_type='glob'): | |
''' | |
.. versionchanged:: 2017.7.0 | |
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier | |
releases must use ``expr_form``. | |
Clear the cached pillar data of the targeted minions | |
CLI Example: | |
.. code-block:: bash | |
salt-run cache.clear_pillar | |
''' | |
return _clear_cache(tgt, tgt_type, clear_pillar_flag=True) | |
def clear_grains(tgt=None, tgt_type='glob'): | |
''' | |
.. versionchanged:: 2017.7.0 | |
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier | |
releases must use ``expr_form``. | |
Clear the cached grains data of the targeted minions | |
CLI Example: | |
.. code-block:: bash | |
salt-run cache.clear_grains | |
''' | |
return _clear_cache(tgt, tgt_type, clear_grains_flag=True) | |
def clear_mine(tgt=None, tgt_type='glob'): | |
''' | |
.. versionchanged:: 2017.7.0 | |
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier | |
releases must use ``expr_form``. | |
Clear the cached mine data of the targeted minions | |
CLI Example: | |
.. code-block:: bash | |
salt-run cache.clear_mine | |
''' | |
return _clear_cache(tgt, tgt_type, clear_mine_flag=True) | |
def clear_mine_func(tgt=None, | |
tgt_type='glob', | |
clear_mine_func_flag=None): | |
''' | |
.. versionchanged:: 2017.7.0 | |
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier | |
releases must use ``expr_form``. | |
Clear the cached mine function data of the targeted minions | |
CLI Example: | |
.. code-block:: bash | |
salt-run cache.clear_mine_func tgt='*' clear_mine_func_flag='network.interfaces' | |
''' | |
return _clear_cache(tgt, tgt_type, clear_mine_func_flag=clear_mine_func_flag) | |
def clear_all(tgt=None, tgt_type='glob'): | |
''' | |
.. versionchanged:: 2017.7.0 | |
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier | |
releases must use ``expr_form``. | |
Clear the cached pillar, grains, and mine data of the targeted minions | |
CLI Example: | |
.. code-block:: bash | |
salt-run cache.clear_all | |
''' | |
return _clear_cache(tgt, | |
tgt_type, | |
clear_pillar_flag=True, | |
clear_grains_flag=True, | |
clear_mine_flag=True) | |
def clear_git_lock(role, remote=None, **kwargs): | |
''' | |
.. versionadded:: 2015.8.2 | |
Remove the update locks for Salt components (gitfs, git_pillar, winrepo) | |
which use gitfs backend code from salt.utils.gitfs. | |
.. note:: | |
Running :py:func:`cache.clear_all <salt.runners.cache.clear_all>` will | |
not include this function as it does for pillar, grains, and mine. | |
Additionally, executing this function with a ``role`` of ``gitfs`` is | |
equivalent to running ``salt-run fileserver.clear_lock backend=git``. | |
role | |
Which type of lock to remove (``gitfs``, ``git_pillar``, or | |
``winrepo``) | |
remote | |
If specified, then any remotes which contain the passed string will | |
have their lock cleared. For example, a ``remote`` value of **github** | |
will remove the lock from all github.com remotes. | |
type : update,checkout,mountpoint | |
The types of lock to clear. Can be one or more of ``update``, | |
``checkout``, and ``mountpoint``, and can be passed either as a | |
comma-separated or Python list. | |
.. versionadded:: 2015.8.8 | |
.. versionchanged:: 2018.3.0 | |
``mountpoint`` lock type added | |
CLI Examples: | |
.. code-block:: bash | |
salt-run cache.clear_git_lock gitfs | |
salt-run cache.clear_git_lock git_pillar | |
salt-run cache.clear_git_lock git_pillar type=update | |
salt-run cache.clear_git_lock git_pillar type=update,checkout | |
salt-run cache.clear_git_lock git_pillar type='["update", "mountpoint"]' | |
''' | |
kwargs = salt.utils.args.clean_kwargs(**kwargs) | |
type_ = salt.utils.args.split_input( | |
kwargs.pop('type', ['update', 'checkout', 'mountpoint'])) | |
if kwargs: | |
salt.utils.args.invalid_kwargs(kwargs) | |
if role == 'gitfs': | |
git_objects = [ | |
salt.utils.gitfs.GitFS( | |
__opts__, | |
__opts__['gitfs_remotes'], | |
per_remote_overrides=salt.fileserver.gitfs.PER_REMOTE_OVERRIDES, | |
per_remote_only=salt.fileserver.gitfs.PER_REMOTE_ONLY | |
) | |
] | |
elif role == 'git_pillar': | |
git_objects = [] | |
for ext_pillar in __opts__['ext_pillar']: | |
key = next(iter(ext_pillar)) | |
if key == 'git': | |
if not isinstance(ext_pillar['git'], list): | |
continue | |
obj = salt.utils.gitfs.GitPillar( | |
__opts__, | |
ext_pillar['git'], | |
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, | |
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, | |
global_only=salt.pillar.git_pillar.GLOBAL_ONLY) | |
git_objects.append(obj) | |
elif role == 'winrepo': | |
winrepo_dir = __opts__['winrepo_dir'] | |
winrepo_remotes = __opts__['winrepo_remotes'] | |
git_objects = [] | |
for remotes, base_dir in ( | |
(winrepo_remotes, winrepo_dir), | |
(__opts__['winrepo_remotes_ng'], __opts__['winrepo_dir_ng']) | |
): | |
obj = salt.utils.gitfs.WinRepo( | |
__opts__, | |
remotes, | |
per_remote_overrides=salt.runners.winrepo.PER_REMOTE_OVERRIDES, | |
per_remote_only=salt.runners.winrepo.PER_REMOTE_ONLY, | |
global_only=salt.runners.winrepo.GLOBAL_ONLY, | |
cache_root=base_dir) | |
git_objects.append(obj) | |
else: | |
raise SaltInvocationError('Invalid role \'{0}\''.format(role)) | |
ret = {} | |
for obj in git_objects: | |
for lock_type in type_: | |
cleared, errors = _clear_lock(obj.clear_lock, | |
role, | |
remote=remote, | |
lock_type=lock_type) | |
if cleared: | |
ret.setdefault('cleared', []).extend(cleared) | |
if errors: | |
ret.setdefault('errors', []).extend(errors) | |
if not ret: | |
return 'No locks were removed' | |
return ret | |
def cloud(tgt, provider=None): | |
''' | |
Return cloud cache data for target. | |
.. note:: Only works with glob matching | |
tgt | |
Glob Target to match minion ids | |
provider | |
Cloud Provider | |
CLI Example: | |
.. code-block:: bash | |
salt-run cache.cloud 'salt*' | |
salt-run cache.cloud glance.example.org provider=openstack | |
''' | |
if not isinstance(tgt, six.string_types): | |
return {} | |
opts = salt.config.cloud_config( | |
os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud') | |
) | |
if not opts.get('update_cachedir'): | |
return {} | |
cloud_cache = __utils__['cloud.list_cache_nodes_full'](opts=opts, provider=provider) | |
if cloud_cache is None: | |
return {} | |
ret = {} | |
for driver, providers in six.iteritems(cloud_cache): | |
for provider, servers in six.iteritems(providers): | |
for name, data in six.iteritems(servers): | |
if fnmatch.fnmatch(name, tgt): | |
ret[name] = data | |
ret[name]['provider'] = provider | |
return ret | |
def store(bank, key, data, cachedir=None): | |
''' | |
Lists entries stored in the specified bank. | |
CLI Example: | |
.. code-block:: bash | |
salt-run cache.store mycache mykey 'The time has come the walrus said' | |
''' | |
if cachedir is None: | |
cachedir = __opts__['cachedir'] | |
try: | |
cache = salt.cache.Cache(__opts__, cachedir=cachedir) | |
except TypeError: | |
cache = salt.cache.Cache(__opts__) | |
return cache.store(bank, key, data) | |
def list_(bank, cachedir=None): | |
''' | |
Lists entries stored in the specified bank. | |
CLI Example: | |
.. code-block:: bash | |
salt-run cache.list cloud/active/ec2/myec2 cachedir=/var/cache/salt/ | |
''' | |
if cachedir is None: | |
cachedir = __opts__['cachedir'] | |
try: | |
cache = salt.cache.Cache(__opts__, cachedir=cachedir) | |
except TypeError: | |
cache = salt.cache.Cache(__opts__) | |
return cache.list(bank) | |
def fetch(bank, key, cachedir=None): | |
''' | |
Fetch data from a salt.cache bank. | |
CLI Example: | |
.. code-block:: bash | |
salt-run cache.fetch cloud/active/ec2/myec2 myminion cachedir=/var/cache/salt/ | |
''' | |
if cachedir is None: | |
cachedir = __opts__['cachedir'] | |
try: | |
cache = salt.cache.Cache(__opts__, cachedir=cachedir) | |
except TypeError: | |
cache = salt.cache.Cache(__opts__) | |
return cache.fetch(bank, key) | |
def flush(bank, key=None, cachedir=None): | |
''' | |
Remove the key from the cache bank with all the key content. If no key is | |
specified remove the entire bank with all keys and sub-banks inside. | |
CLI Examples: | |
.. code-block:: bash | |
salt-run cache.flush cloud/active/ec2/myec2 cachedir=/var/cache/salt/ | |
salt-run cache.flush cloud/active/ec2/myec2 myminion cachedir=/var/cache/salt/ | |
''' | |
if cachedir is None: | |
cachedir = __opts__['cachedir'] | |
try: | |
cache = salt.cache.Cache(__opts__, cachedir=cachedir) | |
except TypeError: | |
cache = salt.cache.Cache(__opts__) | |
return cache.flush(bank, key) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Myutils needs to be a package dir, so create an __init__.py | |
# Put this in your runners dir, under a directory called myutils, and | |
# call it "myutils/__init__.py" |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Put this in your runners directory, in a directory called "myutils", | |
# and call it "myutils/master.py" | |
# -*- coding: utf-8 -*- | |
''' | |
salt.utils.master | |
----------------- | |
Utilities that can only be used on a salt master. | |
''' | |
# Import python libs | |
from __future__ import absolute_import, unicode_literals | |
import os | |
import logging | |
import signal | |
from threading import Thread, Event | |
# Import salt libs | |
import salt.log | |
import salt.cache | |
import salt.client | |
import salt.pillar | |
import salt.utils.atomicfile | |
import salt.utils.files | |
import myutils.minions | |
import salt.utils.platform | |
import salt.utils.stringutils | |
import salt.utils.verify | |
import salt.payload | |
from salt.exceptions import SaltException | |
import salt.config | |
from salt.utils.cache import CacheCli as cache_cli | |
from salt.utils.process import MultiprocessingProcess | |
# Import third party libs | |
from salt.ext import six | |
from salt.utils.zeromq import zmq | |
log = logging.getLogger(__name__) | |
class MasterPillarUtil(object): | |
''' | |
Helper utility for easy access to targeted minion grain and | |
pillar data, either from cached data on the master or retrieved | |
on demand, or (by default) both. | |
The minion pillar data returned in get_minion_pillar() is | |
compiled directly from salt.pillar.Pillar on the master to | |
avoid any possible 'pillar poisoning' from a compromised or | |
untrusted minion. | |
** However, the minion grains are still possibly entirely | |
supplied by the minion. ** | |
Example use case: | |
For runner modules that need access minion pillar data, | |
MasterPillarUtil.get_minion_pillar should be used instead | |
of getting the pillar data by executing the "pillar" module | |
on the minions: | |
# my_runner.py | |
tgt = 'web*' | |
pillar_util = salt.utils.master.MasterPillarUtil(tgt, tgt_type='glob', opts=__opts__) | |
pillar_data = pillar_util.get_minion_pillar() | |
''' | |
def __init__(self, | |
tgt='', | |
tgt_type='glob', | |
saltenv=None, | |
use_cached_grains=True, | |
use_cached_pillar=True, | |
grains_fallback=True, | |
pillar_fallback=True, | |
opts=None): | |
log.debug('New instance of %s created.', | |
self.__class__.__name__) | |
if opts is None: | |
log.error('%s: Missing master opts init arg.', | |
self.__class__.__name__) | |
raise SaltException('{0}: Missing master opts init arg.'.format( | |
self.__class__.__name__)) | |
else: | |
self.opts = opts | |
self.serial = salt.payload.Serial(self.opts) | |
self.tgt = tgt | |
self.tgt_type = tgt_type | |
self.saltenv = saltenv | |
self.use_cached_grains = use_cached_grains | |
self.use_cached_pillar = use_cached_pillar | |
self.grains_fallback = grains_fallback | |
self.pillar_fallback = pillar_fallback | |
self.cache = salt.cache.factory(opts) | |
log.debug( | |
'Init settings: tgt: \'%s\', tgt_type: \'%s\', saltenv: \'%s\', ' | |
'use_cached_grains: %s, use_cached_pillar: %s, ' | |
'grains_fallback: %s, pillar_fallback: %s', | |
tgt, tgt_type, saltenv, use_cached_grains, use_cached_pillar, | |
grains_fallback, pillar_fallback | |
) | |
def _get_cached_mine_data(self, *minion_ids): | |
# Return one dict with the cached mine data of the targeted minions | |
mine_data = dict([(minion_id, {}) for minion_id in minion_ids]) | |
if (not self.opts.get('minion_data_cache', False) | |
and not self.opts.get('enforce_mine_cache', False)): | |
log.debug('Skipping cached mine data minion_data_cache' | |
'and enfore_mine_cache are both disabled.') | |
return mine_data | |
if not minion_ids: | |
minion_ids = self.cache.list('minions') | |
for minion_id in minion_ids: | |
if not salt.utils.verify.valid_id(self.opts, minion_id): | |
continue | |
mdata = self.cache.fetch('minions/{0}'.format(minion_id), 'mine') | |
if isinstance(mdata, dict): | |
mine_data[minion_id] = mdata | |
return mine_data | |
def _get_cached_minion_data(self, *minion_ids): | |
# Return two separate dicts of cached grains and pillar data of the | |
# minions | |
grains = dict([(minion_id, {}) for minion_id in minion_ids]) | |
pillars = grains.copy() | |
if not self.opts.get('minion_data_cache', False): | |
log.debug('Skipping cached data because minion_data_cache is not ' | |
'enabled.') | |
return grains, pillars | |
if not minion_ids: | |
minion_ids = self.cache.list('minions') | |
for minion_id in minion_ids: | |
if not salt.utils.verify.valid_id(self.opts, minion_id): | |
continue | |
mdata = self.cache.fetch('minions/{0}'.format(minion_id), 'data') | |
if not isinstance(mdata, dict): | |
log.warning( | |
'cache.fetch should always return a dict. ReturnedType: %s, MinionId: %s', | |
type(mdata).__name__, | |
minion_id | |
) | |
continue | |
if 'grains' in mdata: | |
grains[minion_id] = mdata['grains'] | |
if 'pillar' in mdata: | |
pillars[minion_id] = mdata['pillar'] | |
return grains, pillars | |
def _get_live_minion_grains(self, minion_ids): | |
# Returns a dict of grains fetched directly from the minions | |
log.debug('Getting live grains for minions: "%s"', minion_ids) | |
client = salt.client.get_local_client(self.opts['conf_file']) | |
ret = client.cmd( | |
','.join(minion_ids), | |
'grains.items', | |
timeout=self.opts['timeout'], | |
tgt_type='list') | |
return ret | |
def _get_live_minion_pillar(self, minion_id=None, minion_grains=None): | |
# Returns a dict of pillar data for one minion | |
if minion_id is None: | |
return {} | |
if not minion_grains: | |
log.warning( | |
'Cannot get pillar data for %s: no grains supplied.', | |
minion_id | |
) | |
return {} | |
log.debug('Getting live pillar for %s', minion_id) | |
pillar = salt.pillar.Pillar( | |
self.opts, | |
minion_grains, | |
minion_id, | |
self.saltenv, | |
self.opts['ext_pillar']) | |
log.debug('Compiling pillar for %s', minion_id) | |
ret = pillar.compile_pillar() | |
return ret | |
def _get_minion_grains(self, *minion_ids, **kwargs): | |
# Get the minion grains either from cache or from a direct query | |
# on the minion. By default try to use cached grains first, then | |
# fall back to querying the minion directly. | |
ret = {} | |
cached_grains = kwargs.get('cached_grains', {}) | |
cret = {} | |
lret = {} | |
if self.use_cached_grains: | |
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_grains) if mcache]) | |
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in cret] | |
log.debug('Missed cached minion grains for: %s', missed_minions) | |
if self.grains_fallback: | |
lret = self._get_live_minion_grains(missed_minions) | |
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items())) | |
else: | |
lret = self._get_live_minion_grains(minion_ids) | |
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in lret] | |
log.debug('Missed live minion grains for: %s', missed_minions) | |
if self.grains_fallback: | |
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_grains) if mcache]) | |
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items())) | |
return ret | |
def _get_minion_pillar(self, *minion_ids, **kwargs): | |
# Get the minion pillar either from cache or from a direct query | |
# on the minion. By default try use the cached pillar first, then | |
# fall back to rendering pillar on demand with the supplied grains. | |
ret = {} | |
grains = kwargs.get('grains', {}) | |
cached_pillar = kwargs.get('cached_pillar', {}) | |
cret = {} | |
lret = {} | |
if self.use_cached_pillar: | |
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_pillar) if mcache]) | |
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in cret] | |
log.debug('Missed cached minion pillars for: %s', missed_minions) | |
if self.pillar_fallback: | |
lret = dict([(minion_id, self._get_live_minion_pillar(minion_id, grains.get(minion_id, {}))) for minion_id in missed_minions]) | |
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items())) | |
else: | |
lret = dict([(minion_id, self._get_live_minion_pillar(minion_id, grains.get(minion_id, {}))) for minion_id in minion_ids]) | |
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in lret] | |
log.debug('Missed live minion pillars for: %s', missed_minions) | |
if self.pillar_fallback: | |
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_pillar) if mcache]) | |
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items())) | |
return ret | |
def _tgt_to_list(self): | |
# Return a list of minion ids that match the target and tgt_type | |
minion_ids = [] | |
ckminions = myutils.minions.CkMinions(self.opts) | |
_res = ckminions.check_minions(self.tgt, self.tgt_type) | |
minion_ids = _res['minions'] | |
if len(minion_ids) == 0: | |
log.debug('No minions matched for tgt="%s" and tgt_type="%s"', self.tgt, self.tgt_type) | |
return {} | |
log.debug('Matching minions for tgt="%s" and tgt_type="%s": %s', self.tgt, self.tgt_type, minion_ids) | |
return minion_ids | |
def get_minion_pillar(self): | |
''' | |
Get pillar data for the targeted minions, either by fetching the | |
cached minion data on the master, or by compiling the minion's | |
pillar data on the master. | |
For runner modules that need access minion pillar data, this | |
function should be used instead of getting the pillar data by | |
executing the pillar module on the minions. | |
By default, this function tries hard to get the pillar data: | |
- Try to get the cached minion grains and pillar if the | |
master has minion_data_cache: True | |
- If the pillar data for the minion is cached, use it. | |
- If there is no cached grains/pillar data for a minion, | |
then try to get the minion grains directly from the minion. | |
- Use the minion grains to compile the pillar directly from the | |
master using salt.pillar.Pillar | |
''' | |
minion_pillars = {} | |
minion_grains = {} | |
minion_ids = self._tgt_to_list() | |
if any(arg for arg in [self.use_cached_grains, self.use_cached_pillar, self.grains_fallback, self.pillar_fallback]): | |
log.debug('Getting cached minion data') | |
cached_minion_grains, cached_minion_pillars = self._get_cached_minion_data(*minion_ids) | |
else: | |
cached_minion_grains = {} | |
cached_minion_pillars = {} | |
log.debug('Getting minion grain data for: %s', minion_ids) | |
minion_grains = self._get_minion_grains( | |
*minion_ids, | |
cached_grains=cached_minion_grains) | |
log.debug('Getting minion pillar data for: %s', minion_ids) | |
minion_pillars = self._get_minion_pillar( | |
*minion_ids, | |
grains=minion_grains, | |
cached_pillar=cached_minion_pillars) | |
return minion_pillars | |
def get_minion_grains(self): | |
''' | |
Get grains data for the targeted minions, either by fetching the | |
cached minion data on the master, or by fetching the grains | |
directly on the minion. | |
By default, this function tries hard to get the grains data: | |
- Try to get the cached minion grains if the master | |
has minion_data_cache: True | |
- If the grains data for the minion is cached, use it. | |
- If there is no cached grains data for a minion, | |
then try to get the minion grains directly from the minion. | |
''' | |
minion_grains = {} | |
minion_ids = self._tgt_to_list() | |
if not minion_ids: | |
return {} | |
if any(arg for arg in [self.use_cached_grains, self.grains_fallback]): | |
log.debug('Getting cached minion data.') | |
cached_minion_grains, cached_minion_pillars = self._get_cached_minion_data(*minion_ids) | |
else: | |
cached_minion_grains = {} | |
log.debug('Getting minion grain data for: %s', minion_ids) | |
minion_grains = self._get_minion_grains( | |
*minion_ids, | |
cached_grains=cached_minion_grains) | |
return minion_grains | |
def get_cached_mine_data(self): | |
''' | |
Get cached mine data for the targeted minions. | |
''' | |
mine_data = {} | |
minion_ids = self._tgt_to_list() | |
log.debug('Getting cached mine data for: %s', minion_ids) | |
mine_data = self._get_cached_mine_data(*minion_ids) | |
return mine_data | |
def clear_cached_minion_data(self, | |
clear_pillar=False, | |
clear_grains=False, | |
clear_mine=False, | |
clear_mine_func=None): | |
''' | |
Clear the cached data/files for the targeted minions. | |
''' | |
clear_what = [] | |
if clear_pillar: | |
clear_what.append('pillar') | |
if clear_grains: | |
clear_what.append('grains') | |
if clear_mine: | |
clear_what.append('mine') | |
if clear_mine_func is not None: | |
clear_what.append('mine_func: \'{0}\''.format(clear_mine_func)) | |
if not len(clear_what): | |
log.debug('No cached data types specified for clearing.') | |
return False | |
minion_ids = self._tgt_to_list() | |
log.debug('Clearing cached %s data for: %s', | |
', '.join(clear_what), | |
minion_ids) | |
if clear_pillar == clear_grains: | |
# clear_pillar and clear_grains are both True or both False. | |
# This means we don't deal with pillar/grains caches at all. | |
grains = {} | |
pillars = {} | |
else: | |
# Unless both clear_pillar and clear_grains are True, we need | |
# to read in the pillar/grains data since they are both stored | |
# in the same file, 'data.p' | |
grains, pillars = self._get_cached_minion_data(*minion_ids) | |
try: | |
c_minions = self.cache.list('minions') | |
for minion_id in minion_ids: | |
if not salt.utils.verify.valid_id(self.opts, minion_id): | |
continue | |
if minion_id not in c_minions: | |
# Cache bank for this minion does not exist. Nothing to do. | |
continue | |
bank = 'minions/{0}'.format(minion_id) | |
minion_pillar = pillars.pop(minion_id, False) | |
minion_grains = grains.pop(minion_id, False) | |
if ((clear_pillar and clear_grains) or | |
(clear_pillar and not minion_grains) or | |
(clear_grains and not minion_pillar)): | |
# Not saving pillar or grains, so just delete the cache file | |
self.cache.flush(bank, 'data') | |
elif clear_pillar and minion_grains: | |
self.cache.store(bank, 'data', {'grains': minion_grains}) | |
elif clear_grains and minion_pillar: | |
self.cache.store(bank, 'data', {'pillar': minion_pillar}) | |
if clear_mine: | |
# Delete the whole mine file | |
self.cache.flush(bank, 'mine') | |
elif clear_mine_func is not None: | |
# Delete a specific function from the mine file | |
mine_data = self.cache.fetch(bank, 'mine') | |
if isinstance(mine_data, dict): | |
if mine_data.pop(clear_mine_func, False): | |
self.cache.store(bank, 'mine', mine_data) | |
except (OSError, IOError): | |
return True | |
return True | |
class CacheTimer(Thread): | |
''' | |
A basic timer class the fires timer-events every second. | |
This is used for cleanup by the ConnectedCache() | |
''' | |
def __init__(self, opts, event): | |
Thread.__init__(self) | |
self.opts = opts | |
self.stopped = event | |
self.daemon = True | |
self.serial = salt.payload.Serial(opts.get('serial', '')) | |
self.timer_sock = os.path.join(self.opts['sock_dir'], 'con_timer.ipc') | |
def run(self): | |
''' | |
main loop that fires the event every second | |
''' | |
context = zmq.Context() | |
# the socket for outgoing timer events | |
socket = context.socket(zmq.PUB) | |
socket.setsockopt(zmq.LINGER, 100) | |
socket.bind('ipc://' + self.timer_sock) | |
count = 0 | |
log.debug('ConCache-Timer started') | |
while not self.stopped.wait(1): | |
socket.send(self.serial.dumps(count)) | |
count += 1 | |
if count >= 60: | |
count = 0 | |
class CacheWorker(MultiprocessingProcess): | |
''' | |
Worker for ConnectedCache which runs in its | |
own process to prevent blocking of ConnectedCache | |
main-loop when refreshing minion-list | |
''' | |
def __init__(self, opts, **kwargs): | |
''' | |
Sets up the zmq-connection to the ConCache | |
''' | |
super(CacheWorker, self).__init__(**kwargs) | |
self.opts = opts | |
# __setstate__ and __getstate__ are only used on Windows. | |
# We do this so that __init__ will be invoked on Windows in the child | |
# process so that a register_after_fork() equivalent will work on Windows. | |
def __setstate__(self, state): | |
self._is_child = True | |
self.__init__( | |
state['opts'], | |
log_queue=state['log_queue'], | |
log_queue_level=state['log_queue_level'] | |
) | |
def __getstate__(self): | |
return { | |
'opts': self.opts, | |
'log_queue': self.log_queue, | |
'log_queue_level': self.log_queue_level | |
} | |
def run(self): | |
''' | |
Gather currently connected minions and update the cache | |
''' | |
new_mins = list(myutils.minions.CkMinions(self.opts).connected_ids()) | |
cc = cache_cli(self.opts) | |
cc.get_cached() | |
cc.put_cache([new_mins]) | |
log.debug('ConCache CacheWorker update finished') | |
class ConnectedCache(MultiprocessingProcess): | |
''' | |
Provides access to all minions ids that the master has | |
successfully authenticated. The cache is cleaned up regularly by | |
comparing it to the IPs that have open connections to | |
the master publisher port. | |
''' | |
def __init__(self, opts, **kwargs): | |
''' | |
starts the timer and inits the cache itself | |
''' | |
super(ConnectedCache, self).__init__(**kwargs) | |
log.debug('ConCache initializing...') | |
# the possible settings for the cache | |
self.opts = opts | |
# the actual cached minion ids | |
self.minions = [] | |
self.cache_sock = os.path.join(self.opts['sock_dir'], 'con_cache.ipc') | |
self.update_sock = os.path.join(self.opts['sock_dir'], 'con_upd.ipc') | |
self.upd_t_sock = os.path.join(self.opts['sock_dir'], 'con_timer.ipc') | |
self.cleanup() | |
# the timer provides 1-second intervals to the loop in run() | |
# to make the cache system most responsive, we do not use a loop- | |
# delay which makes it hard to get 1-second intervals without a timer | |
self.timer_stop = Event() | |
self.timer = CacheTimer(self.opts, self.timer_stop) | |
self.timer.start() | |
self.running = True | |
# __setstate__ and __getstate__ are only used on Windows. | |
# We do this so that __init__ will be invoked on Windows in the child | |
# process so that a register_after_fork() equivalent will work on Windows. | |
def __setstate__(self, state): | |
self._is_child = True | |
self.__init__( | |
state['opts'], | |
log_queue=state['log_queue'], | |
log_queue_level=state['log_queue_level'] | |
) | |
def __getstate__(self): | |
return { | |
'opts': self.opts, | |
'log_queue': self.log_queue, | |
'log_queue_level': self.log_queue_level | |
} | |
def signal_handler(self, sig, frame): | |
''' | |
handle signals and shutdown | |
''' | |
self.stop() | |
def cleanup(self): | |
''' | |
remove sockets on shutdown | |
''' | |
log.debug('ConCache cleaning up') | |
if os.path.exists(self.cache_sock): | |
os.remove(self.cache_sock) | |
if os.path.exists(self.update_sock): | |
os.remove(self.update_sock) | |
if os.path.exists(self.upd_t_sock): | |
os.remove(self.upd_t_sock) | |
def secure(self): | |
''' | |
secure the sockets for root-only access | |
''' | |
log.debug('ConCache securing sockets') | |
if os.path.exists(self.cache_sock): | |
os.chmod(self.cache_sock, 0o600) | |
if os.path.exists(self.update_sock): | |
os.chmod(self.update_sock, 0o600) | |
if os.path.exists(self.upd_t_sock): | |
os.chmod(self.upd_t_sock, 0o600) | |
def stop(self): | |
''' | |
shutdown cache process | |
''' | |
# avoid getting called twice | |
self.cleanup() | |
if self.running: | |
self.running = False | |
self.timer_stop.set() | |
self.timer.join() | |
def run(self): | |
''' | |
Main loop of the ConCache, starts updates in intervals and | |
answers requests from the MWorkers | |
''' | |
context = zmq.Context() | |
# the socket for incoming cache requests | |
creq_in = context.socket(zmq.REP) | |
creq_in.setsockopt(zmq.LINGER, 100) | |
creq_in.bind('ipc://' + self.cache_sock) | |
# the socket for incoming cache-updates from workers | |
cupd_in = context.socket(zmq.SUB) | |
cupd_in.setsockopt(zmq.SUBSCRIBE, b'') | |
cupd_in.setsockopt(zmq.LINGER, 100) | |
cupd_in.bind('ipc://' + self.update_sock) | |
# the socket for the timer-event | |
timer_in = context.socket(zmq.SUB) | |
timer_in.setsockopt(zmq.SUBSCRIBE, b'') | |
timer_in.setsockopt(zmq.LINGER, 100) | |
timer_in.connect('ipc://' + self.upd_t_sock) | |
poller = zmq.Poller() | |
poller.register(creq_in, zmq.POLLIN) | |
poller.register(cupd_in, zmq.POLLIN) | |
poller.register(timer_in, zmq.POLLIN) | |
# our serializer | |
serial = salt.payload.Serial(self.opts.get('serial', '')) | |
# register a signal handler | |
signal.signal(signal.SIGINT, self.signal_handler) | |
# secure the sockets from the world | |
self.secure() | |
log.info('ConCache started') | |
while self.running: | |
# we check for new events with the poller | |
try: | |
socks = dict(poller.poll(1)) | |
except KeyboardInterrupt: | |
self.stop() | |
except zmq.ZMQError as zmq_err: | |
log.error('ConCache ZeroMQ-Error occurred') | |
log.exception(zmq_err) | |
self.stop() | |
# check for next cache-request | |
if socks.get(creq_in) == zmq.POLLIN: | |
msg = serial.loads(creq_in.recv()) | |
log.debug('ConCache Received request: %s', msg) | |
# requests to the minion list are send as str's | |
if isinstance(msg, six.string_types): | |
if msg == 'minions': | |
# Send reply back to client | |
reply = serial.dumps(self.minions) | |
creq_in.send(reply) | |
# check for next cache-update from workers | |
if socks.get(cupd_in) == zmq.POLLIN: | |
new_c_data = serial.loads(cupd_in.recv()) | |
# tell the worker to exit | |
#cupd_in.send(serial.dumps('ACK')) | |
# check if the returned data is usable | |
if not isinstance(new_c_data, list): | |
log.error('ConCache Worker returned unusable result') | |
del new_c_data | |
continue | |
# the cache will receive lists of minions | |
# 1. if the list only has 1 item, its from an MWorker, we append it | |
# 2. if the list contains another list, its from a CacheWorker and | |
# the currently cached minions are replaced with that list | |
# 3. anything else is considered malformed | |
try: | |
if len(new_c_data) == 0: | |
log.debug('ConCache Got empty update from worker') | |
continue | |
data = new_c_data[0] | |
if isinstance(data, six.string_types): | |
if data not in self.minions: | |
log.debug('ConCache Adding minion %s to cache', | |
new_c_data[0]) | |
self.minions.append(data) | |
elif isinstance(data, list): | |
log.debug('ConCache Replacing minion list from worker') | |
self.minions = data | |
except IndexError: | |
log.debug('ConCache Got malformed result dict from worker') | |
del new_c_data | |
log.info('ConCache %s entries in cache', len(self.minions)) | |
# check for next timer-event to start new jobs | |
if socks.get(timer_in) == zmq.POLLIN: | |
sec_event = serial.loads(timer_in.recv()) | |
# update the list every 30 seconds | |
if int(sec_event % 30) == 0: | |
cw = CacheWorker(self.opts) | |
cw.start() | |
self.stop() | |
creq_in.close() | |
cupd_in.close() | |
timer_in.close() | |
context.term() | |
log.debug('ConCache Shutting down') | |
def ping_all_connected_minions(opts): | |
client = salt.client.LocalClient() | |
if opts['minion_data_cache']: | |
tgt = list(myutils.minions.CkMinions(opts).connected_ids()) | |
form = 'list' | |
else: | |
tgt = '*' | |
form = 'glob' | |
client.cmd(tgt, 'test.ping', tgt_type=form) | |
def get_master_key(key_user, opts, skip_perm_errors=False): | |
if key_user == 'root': | |
if opts.get('user', 'root') != 'root': | |
key_user = opts.get('user', 'root') | |
if key_user.startswith('sudo_'): | |
key_user = opts.get('user', 'root') | |
if salt.utils.platform.is_windows(): | |
# The username may contain '\' if it is in Windows | |
# 'DOMAIN\username' format. Fix this for the keyfile path. | |
key_user = key_user.replace('\\', '_') | |
keyfile = os.path.join(opts['cachedir'], | |
'.{0}_key'.format(key_user)) | |
# Make sure all key parent directories are accessible | |
salt.utils.verify.check_path_traversal(opts['cachedir'], | |
key_user, | |
skip_perm_errors) | |
try: | |
with salt.utils.files.fopen(keyfile, 'r') as key: | |
return key.read() | |
except (OSError, IOError): | |
# Fall back to eauth | |
return '' | |
def get_values_of_matching_keys(pattern_dict, user_name): | |
''' | |
Check a whitelist and/or blacklist to see if the value matches it. | |
''' | |
ret = [] | |
for expr in pattern_dict: | |
if salt.utils.stringutils.expr_match(user_name, expr): | |
ret.extend(pattern_dict[expr]) | |
return ret | |
# test code for the ConCache class | |
if __name__ == '__main__': | |
opts = salt.config.master_config('/etc/salt/master') | |
conc = ConnectedCache(opts) | |
conc.start() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Put this in your runners dir, under a directory called myutils, and | |
# call it "myutils/minions.py" | |
# -*- coding: utf-8 -*- | |
''' | |
This module contains routines used to verify the matcher against the minions | |
expected to return | |
''' | |
# Import python libs | |
from __future__ import absolute_import, unicode_literals | |
import os | |
import fnmatch | |
import re | |
import logging | |
# Import salt libs | |
import salt.payload | |
import salt.roster | |
import salt.utils.data | |
import salt.utils.files | |
import salt.utils.network | |
import salt.utils.stringutils | |
import salt.utils.versions | |
from salt.defaults import DEFAULT_TARGET_DELIM | |
from salt.exceptions import CommandExecutionError, SaltCacheError | |
import salt.auth.ldap | |
import salt.cache | |
from salt.ext import six | |
# Import 3rd-party libs | |
if six.PY3: | |
import ipaddress | |
else: | |
import salt.ext.ipaddress as ipaddress | |
HAS_RANGE = False | |
try: | |
import seco.range # pylint: disable=import-error | |
HAS_RANGE = True | |
except ImportError: | |
pass | |
log = logging.getLogger(__name__) | |
TARGET_REX = re.compile( | |
r'''(?x) | |
( | |
(?P<engine>G|P|I|J|L|N|S|E|R) # Possible target engines | |
(?P<delimiter>(?<=G|P|I|J).)? # Optional delimiter for specific engines | |
@)? # Engine+delimiter are separated by a '@' | |
# character and are optional for the target | |
(?P<pattern>.+)$''' # The pattern passed to the target engine | |
) | |
def parse_target(target_expression): | |
'''Parse `target_expressing` splitting it into `engine`, `delimiter`, | |
`pattern` - returns a dict''' | |
match = TARGET_REX.match(target_expression) | |
if not match: | |
log.warning('Unable to parse target "%s"', target_expression) | |
ret = { | |
'engine': None, | |
'delimiter': None, | |
'pattern': target_expression, | |
} | |
else: | |
ret = match.groupdict() | |
return ret | |
def get_minion_data(minion, opts): | |
''' | |
Get the grains/pillar for a specific minion. If minion is None, it | |
will return the grains/pillar for the first minion it finds. | |
Return value is a tuple of the minion ID, grains, and pillar | |
''' | |
grains = None | |
pillar = None | |
if opts.get('minion_data_cache', False): | |
cache = salt.cache.factory(opts) | |
if minion is None: | |
for id_ in cache.list('minions'): | |
data = cache.fetch('minions/{0}'.format(id_), 'data') | |
if data is None: | |
continue | |
else: | |
data = cache.fetch('minions/{0}'.format(minion), 'data') | |
if data is not None: | |
grains = data.get('grains', None) | |
pillar = data.get('pillar', None) | |
return minion if minion else None, grains, pillar | |
def nodegroup_comp(nodegroup, nodegroups, skip=None, first_call=True): | |
''' | |
Recursively expand ``nodegroup`` from ``nodegroups``; ignore nodegroups in ``skip`` | |
If a top-level (non-recursive) call finds no nodegroups, return the original | |
nodegroup definition (for backwards compatibility). Keep track of recursive | |
calls via `first_call` argument | |
''' | |
expanded_nodegroup = False | |
if skip is None: | |
skip = set() | |
elif nodegroup in skip: | |
log.error('Failed nodegroup expansion: illegal nested nodegroup "%s"', nodegroup) | |
return '' | |
if nodegroup not in nodegroups: | |
log.error('Failed nodegroup expansion: unknown nodegroup "%s"', nodegroup) | |
return '' | |
nglookup = nodegroups[nodegroup] | |
if isinstance(nglookup, six.string_types): | |
words = nglookup.split() | |
elif isinstance(nglookup, (list, tuple)): | |
words = nglookup | |
else: | |
log.error('Nodegroup \'%s\' (%s) is neither a string, list nor tuple', | |
nodegroup, nglookup) | |
return '' | |
skip.add(nodegroup) | |
ret = [] | |
opers = ['and', 'or', 'not', '(', ')'] | |
for word in words: | |
if not isinstance(word, six.string_types): | |
word = six.text_type(word) | |
if word in opers: | |
ret.append(word) | |
elif len(word) >= 3 and word.startswith('N@'): | |
expanded_nodegroup = True | |
ret.extend(nodegroup_comp(word[2:], nodegroups, skip=skip, first_call=False)) | |
else: | |
ret.append(word) | |
if ret: | |
ret.insert(0, '(') | |
ret.append(')') | |
skip.remove(nodegroup) | |
log.debug('nodegroup_comp(%s) => %s', nodegroup, ret) | |
# Only return list form if a nodegroup was expanded. Otherwise return | |
# the original string to conserve backwards compat | |
if expanded_nodegroup or not first_call: | |
return ret | |
else: | |
opers_set = set(opers) | |
ret = words | |
if (set(ret) - opers_set) == set(ret): | |
# No compound operators found in nodegroup definition. Check for | |
# group type specifiers | |
group_type_re = re.compile('^[A-Z]@') | |
regex_chars = ['(', '[', '{', '\\', '?''}])'] | |
if not [x for x in ret if '*' in x or group_type_re.match(x)]: | |
# No group type specifiers and no wildcards. | |
# Treat this as an expression. | |
if [x for x in ret if x in [x for y in regex_chars if y in x]]: | |
joined = 'E@' + ','.join(ret) | |
log.debug( | |
'Nodegroup \'%s\' (%s) detected as an expression. ' | |
'Assuming compound matching syntax of \'%s\'', | |
nodegroup, ret, joined | |
) | |
else: | |
# Treat this as a list of nodenames. | |
joined = 'L@' + ','.join(ret) | |
log.debug( | |
'Nodegroup \'%s\' (%s) detected as list of nodenames. ' | |
'Assuming compound matching syntax of \'%s\'', | |
nodegroup, ret, joined | |
) | |
# Return data must be a list of compound matching components | |
# to be fed into compound matcher. Enclose return data in list. | |
return [joined] | |
log.debug( | |
'No nested nodegroups detected. Using original nodegroup ' | |
'definition: %s', nodegroups[nodegroup] | |
) | |
return ret | |
class CkMinions(object): | |
''' | |
Used to check what minions should respond from a target | |
Note: This is a best-effort set of the minions that would match a target. | |
Depending on master configuration (grains caching, etc.) and topology (syndics) | |
the list may be a subset-- but we err on the side of too-many minions in this | |
class. | |
''' | |
def __init__(self, opts): | |
self.opts = opts | |
self.serial = salt.payload.Serial(opts) | |
self.cache = salt.cache.factory(opts) | |
# TODO: this is actually an *auth* check | |
if self.opts.get('transport', 'zeromq') in ('zeromq', 'tcp'): | |
self.acc = 'minions' | |
else: | |
self.acc = 'accepted' | |
def _check_nodegroup_minions(self, expr, greedy): # pylint: disable=unused-argument | |
''' | |
Return minions found by looking at nodegroups | |
''' | |
return self._check_compound_minions(nodegroup_comp(expr, self.opts['nodegroups']), | |
DEFAULT_TARGET_DELIM, | |
greedy) | |
def _check_glob_minions(self, expr, greedy): # pylint: disable=unused-argument | |
''' | |
Return the minions found by looking via globs | |
''' | |
return {'minions': fnmatch.filter(self._pki_minions(), expr), | |
'missing': []} | |
def _check_list_minions(self, expr, greedy): # pylint: disable=unused-argument | |
''' | |
Return the minions found by looking via a list | |
''' | |
if isinstance(expr, six.string_types): | |
expr = [m for m in expr.split(',') if m] | |
minions = self._pki_minions() | |
return {'minions': [x for x in expr if x in minions], | |
'missing': [x for x in expr if x not in minions]} | |
def _check_pcre_minions(self, expr, greedy): # pylint: disable=unused-argument | |
''' | |
Return the minions found by looking via regular expressions | |
''' | |
reg = re.compile(expr) | |
return {'minions': [m for m in self._pki_minions() if reg.match(m)], | |
'missing': []} | |
def _pki_minions(self): | |
''' | |
Retreive complete minion list from PKI dir. | |
Respects cache if configured | |
''' | |
minions = [] | |
pki_cache_fn = os.path.join(self.opts['pki_dir'], self.acc, '.key_cache') | |
try: | |
if self.opts['key_cache'] and os.path.exists(pki_cache_fn): | |
log.debug('Returning cached minion list') | |
with salt.utils.files.fopen(pki_cache_fn) as fn_: | |
return self.serial.load(fn_) | |
else: | |
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))): | |
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_)): | |
minions.append(fn_) | |
return minions | |
except OSError as exc: | |
log.error( | |
'Encountered OSError while evaluating minions in PKI dir: %s', | |
exc | |
) | |
return minions | |
def _check_cache_minions(self, | |
expr, | |
delimiter, | |
greedy, | |
search_type, | |
regex_match=False, | |
exact_match=False): | |
''' | |
Helper function to search for minions in master caches | |
If 'greedy' return accepted minions that matched by the condition or absend in the cache. | |
If not 'greedy' return the only minions have cache data and matched by the condition. | |
''' | |
cache_enabled = self.opts.get('minion_data_cache', False) | |
def list_cached_minions(): | |
return self.cache.list('minions') | |
if greedy: | |
minions = [] | |
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))): | |
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_)): | |
minions.append(fn_) | |
elif cache_enabled: | |
minions = list_cached_minions() | |
else: | |
return {'minions': [], | |
'missing': []} | |
if cache_enabled: | |
if greedy: | |
cminions = list_cached_minions() | |
else: | |
cminions = minions | |
if not cminions: | |
return {'minions': minions, | |
'missing': []} | |
minions = set(minions) | |
for id_ in cminions: | |
if greedy and id_ not in minions: | |
continue | |
mdata = self.cache.fetch('minions/{0}'.format(id_), 'data') | |
if mdata is None: | |
if not greedy: | |
minions.remove(id_) | |
continue | |
search_results = mdata.get(search_type) | |
if not salt.utils.data.subdict_match(search_results, | |
expr, | |
delimiter=delimiter, | |
regex_match=regex_match, | |
exact_match=exact_match): | |
minions.remove(id_) | |
minions = list(minions) | |
return {'minions': minions, | |
'missing': []} | |
def _check_grain_minions(self, expr, delimiter, greedy): | |
''' | |
Return the minions found by looking via grains | |
''' | |
return self._check_cache_minions(expr, delimiter, greedy, 'grains') | |
def _check_grain_pcre_minions(self, expr, delimiter, greedy): | |
''' | |
Return the minions found by looking via grains with PCRE | |
''' | |
return self._check_cache_minions(expr, | |
delimiter, | |
greedy, | |
'grains', | |
regex_match=True) | |
def _check_pillar_minions(self, expr, delimiter, greedy): | |
''' | |
Return the minions found by looking via pillar | |
''' | |
return self._check_cache_minions(expr, delimiter, greedy, 'pillar') | |
def _check_pillar_pcre_minions(self, expr, delimiter, greedy): | |
''' | |
Return the minions found by looking via pillar with PCRE | |
''' | |
return self._check_cache_minions(expr, | |
delimiter, | |
greedy, | |
'pillar', | |
regex_match=True) | |
def _check_pillar_exact_minions(self, expr, delimiter, greedy): | |
''' | |
Return the minions found by looking via pillar | |
''' | |
return self._check_cache_minions(expr, | |
delimiter, | |
greedy, | |
'pillar', | |
exact_match=True) | |
def _check_ipcidr_minions(self, expr, greedy): | |
''' | |
Return the minions found by looking via ipcidr | |
''' | |
cache_enabled = self.opts.get('minion_data_cache', False) | |
if greedy: | |
minions = self._pki_minions() | |
elif cache_enabled: | |
minions = self.cache.list('minions') | |
else: | |
return {'minions': [], | |
'missing': []} | |
if cache_enabled: | |
if greedy: | |
cminions = self.cache.list('minions') | |
else: | |
cminions = minions | |
if cminions is None: | |
return {'minions': minions, | |
'missing': []} | |
tgt = expr | |
try: | |
# Target is an address? | |
tgt = ipaddress.ip_address(tgt) | |
except: # pylint: disable=bare-except | |
try: | |
# Target is a network? | |
tgt = ipaddress.ip_network(tgt) | |
except: # pylint: disable=bare-except | |
log.error('Invalid IP/CIDR target: %s', tgt) | |
return {'minions': [], | |
'missing': []} | |
proto = 'ipv{0}'.format(tgt.version) | |
minions = set(minions) | |
for id_ in cminions: | |
mdata = self.cache.fetch('minions/{0}'.format(id_), 'data') | |
if mdata is None: | |
if not greedy: | |
minions.remove(id_) | |
continue | |
grains = mdata.get('grains') | |
if grains is None or proto not in grains: | |
match = False | |
elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)): | |
match = six.text_type(tgt) in grains[proto] | |
else: | |
match = salt.utils.network.in_subnet(tgt, grains[proto]) | |
if not match and id_ in minions: | |
minions.remove(id_) | |
return {'minions': list(minions), | |
'missing': []} | |
def _check_range_minions(self, expr, greedy): | |
''' | |
Return the minions found by looking via range expression | |
''' | |
if not HAS_RANGE: | |
raise CommandExecutionError( | |
'Range matcher unavailable (unable to import seco.range, ' | |
'module most likely not installed)' | |
) | |
if not hasattr(self, '_range'): | |
self._range = seco.range.Range(self.opts['range_server']) | |
try: | |
return self._range.expand(expr) | |
except seco.range.RangeException as exc: | |
log.error( | |
'Range exception in compound match: %s', exc | |
) | |
cache_enabled = self.opts.get('minion_data_cache', False) | |
if greedy: | |
mlist = [] | |
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))): | |
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_)): | |
mlist.append(fn_) | |
return {'minions': mlist, | |
'missing': []} | |
elif cache_enabled: | |
return {'minions': self.cache.list('minions'), | |
'missing': []} | |
else: | |
return {'minions': [], | |
'missing': []} | |
def _check_compound_pillar_exact_minions(self, expr, delimiter, greedy): | |
''' | |
Return the minions found by looking via compound matcher | |
Disable pillar glob matching | |
''' | |
return self._check_compound_minions(expr, | |
delimiter, | |
greedy, | |
pillar_exact=True) | |
def _check_compound_minions(self, | |
expr, | |
delimiter, | |
greedy, | |
pillar_exact=False): # pylint: disable=unused-argument | |
''' | |
Return the minions found by looking via compound matcher | |
''' | |
if not isinstance(expr, six.string_types) and not isinstance(expr, (list, tuple)): | |
log.error('Compound target that is neither string, list nor tuple') | |
return {'minions': [], 'missing': []} | |
minions = set(self._pki_minions()) | |
log.debug('minions: %s', minions) | |
nodegroups = self.opts.get('nodegroups', {}) | |
if self.opts.get('minion_data_cache', False): | |
ref = {'G': self._check_grain_minions, | |
'P': self._check_grain_pcre_minions, | |
'I': self._check_pillar_minions, | |
'J': self._check_pillar_pcre_minions, | |
'L': self._check_list_minions, | |
'N': None, # nodegroups should already be expanded | |
'S': self._check_ipcidr_minions, | |
'E': self._check_pcre_minions, | |
'R': self._all_minions} | |
if pillar_exact: | |
ref['I'] = self._check_pillar_exact_minions | |
ref['J'] = self._check_pillar_exact_minions | |
results = [] | |
unmatched = [] | |
opers = ['and', 'or', 'not', '(', ')'] | |
missing = [] | |
if isinstance(expr, six.string_types): | |
words = expr.split() | |
else: | |
# we make a shallow copy in order to not affect the passed in arg | |
words = expr[:] | |
while words: | |
word = words.pop(0) | |
target_info = parse_target(word) | |
# Easy check first | |
if word in opers: | |
if results: | |
if results[-1] == '(' and word in ('and', 'or'): | |
log.error('Invalid beginning operator after "(": %s', word) | |
return {'minions': [], 'missing': []} | |
if word == 'not': | |
if not results[-1] in ('&', '|', '('): | |
results.append('&') | |
results.append('(') | |
results.append(six.text_type(set(minions))) | |
results.append('-') | |
unmatched.append('-') | |
elif word == 'and': | |
results.append('&') | |
elif word == 'or': | |
results.append('|') | |
elif word == '(': | |
results.append(word) | |
unmatched.append(word) | |
elif word == ')': | |
if not unmatched or unmatched[-1] != '(': | |
log.error('Invalid compound expr (unexpected ' | |
'right parenthesis): %s', | |
expr) | |
return {'minions': [], 'missing': []} | |
results.append(word) | |
unmatched.pop() | |
if unmatched and unmatched[-1] == '-': | |
results.append(')') | |
unmatched.pop() | |
else: # Won't get here, unless oper is added | |
log.error('Unhandled oper in compound expr: %s', | |
expr) | |
return {'minions': [], 'missing': []} | |
else: | |
# seq start with oper, fail | |
if word == 'not': | |
results.append('(') | |
results.append(six.text_type(set(minions))) | |
results.append('-') | |
unmatched.append('-') | |
elif word == '(': | |
results.append(word) | |
unmatched.append(word) | |
else: | |
log.error( | |
'Expression may begin with' | |
' binary operator: %s', word | |
) | |
return {'minions': [], 'missing': []} | |
elif target_info and target_info['engine']: | |
if 'N' == target_info['engine']: | |
# if we encounter a node group, just evaluate it in-place | |
decomposed = nodegroup_comp(target_info['pattern'], nodegroups) | |
if decomposed: | |
words = decomposed + words | |
continue | |
engine = ref.get(target_info['engine']) | |
if not engine: | |
# If an unknown engine is called at any time, fail out | |
log.error( | |
'Unrecognized target engine "%s" for' | |
' target expression "%s"', | |
target_info['engine'], | |
word, | |
) | |
return {'minions': [], 'missing': []} | |
engine_args = [target_info['pattern']] | |
if target_info['engine'] in ('G', 'P', 'I', 'J'): | |
engine_args.append(target_info['delimiter'] or ':') | |
engine_args.append(greedy) | |
_results = engine(*engine_args) | |
results.append(six.text_type(set(_results['minions']))) | |
missing.extend(_results['missing']) | |
if unmatched and unmatched[-1] == '-': | |
results.append(')') | |
unmatched.pop() | |
else: | |
# The match is not explicitly defined, evaluate as a glob | |
_results = self._check_glob_minions(word, True) | |
results.append(six.text_type(set(_results['minions']))) | |
if unmatched and unmatched[-1] == '-': | |
results.append(')') | |
unmatched.pop() | |
# Add a closing ')' for each item left in unmatched | |
results.extend([')' for item in unmatched]) | |
results = ' '.join(results) | |
log.debug('Evaluating final compound matching expr: %s', | |
results) | |
try: | |
minions = list(eval(results)) # pylint: disable=W0123 | |
return {'minions': minions, 'missing': missing} | |
except Exception: | |
log.error('Invalid compound target: %s', expr) | |
return {'minions': [], 'missing': []} | |
return {'minions': list(minions), | |
'missing': []} | |
def connected_ids(self, subset=None, show_ip=False, show_ipv4=None, include_localhost=None): | |
''' | |
Return a set of all connected minion ids, optionally within a subset | |
''' | |
if include_localhost is not None: | |
salt.utils.versions.warn_until( | |
'Sodium', | |
'The \'include_localhost\' argument is no longer required; any' | |
'connected localhost minion will always be included.' | |
) | |
if show_ipv4 is not None: | |
salt.utils.versions.warn_until( | |
'Sodium', | |
'The \'show_ipv4\' argument has been renamed to \'show_ip\' as' | |
'it now also includes IPv6 addresses for IPv6-connected' | |
'minions.' | |
) | |
minions = set() | |
if self.opts.get('minion_data_cache', False): | |
search = self.cache.list('minions') | |
if search is None: | |
return minions | |
addrs = salt.utils.network.local_port_tcp(int(self.opts['publish_port'])) | |
if '127.0.0.1' in addrs: | |
# Add in the address of a possible locally-connected minion. | |
addrs.discard('127.0.0.1') | |
addrs.update(set(salt.utils.network.ip_addrs(include_loopback=False))) | |
if '::1' in addrs: | |
# Add in the address of a possible locally-connected minion. | |
addrs.discard('::1') | |
addrs.update(set(salt.utils.network.ip_addrs6(include_loopback=False))) | |
if subset: | |
search = subset | |
for id_ in search: | |
try: | |
mdata = self.cache.fetch('minions/{0}'.format(id_), 'data') | |
except SaltCacheError: | |
# If a SaltCacheError is explicitly raised during the fetch operation, | |
# permission was denied to open the cached data.p file. Continue on as | |
# in the releases <= 2016.3. (An explicit error raise was added in PR | |
# #35388. See issue #36867 for more information. | |
continue | |
if mdata is None: | |
continue | |
grains = mdata.get('grains', {}) | |
for ipv4 in grains.get('ipv4', []): | |
if ipv4 in addrs: | |
if show_ip: | |
minions.add((id_, ipv4)) | |
else: | |
minions.add(id_) | |
break | |
for ipv6 in grains.get('ipv6', []): | |
if ipv6 in addrs: | |
if show_ip: | |
minions.add((id_, ipv6)) | |
else: | |
minions.add(id_) | |
break | |
return minions | |
def _all_minions(self, expr=None): | |
''' | |
Return a list of all minions that have auth'd | |
''' | |
mlist = [] | |
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))): | |
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_)): | |
mlist.append(fn_) | |
return {'minions': mlist, 'missing': []} | |
def check_minions(self, | |
expr, | |
tgt_type='glob', | |
delimiter=DEFAULT_TARGET_DELIM, | |
greedy=True): | |
''' | |
Check the passed regex against the available minions' public keys | |
stored for authentication. This should return a set of ids which | |
match the regex, this will then be used to parse the returns to | |
make sure everyone has checked back in. | |
''' | |
try: | |
if expr is None: | |
expr = '' | |
check_func = getattr(self, '_check_{0}_minions'.format(tgt_type), None) | |
if tgt_type in ('grain', | |
'grain_pcre', | |
'pillar', | |
'pillar_pcre', | |
'pillar_exact', | |
'compound', | |
'compound_pillar_exact'): | |
_res = check_func(expr, delimiter, greedy) | |
else: | |
_res = check_func(expr, greedy) | |
_res['ssh_minions'] = False | |
if self.opts.get('enable_ssh_minions', False) is True and isinstance('tgt', six.string_types): | |
roster = salt.roster.Roster(self.opts, self.opts.get('roster', 'flat')) | |
ssh_minions = roster.targets(expr, tgt_type) | |
if ssh_minions: | |
_res['minions'].extend(ssh_minions) | |
_res['ssh_minions'] = True | |
except Exception: | |
log.exception( | |
'Failed matching available minions with %s pattern: %s', | |
tgt_type, expr) | |
_res = {'minions': [], 'missing': []} | |
return _res | |
def _expand_matching(self, auth_entry): | |
ref = {'G': 'grain', | |
'P': 'grain_pcre', | |
'I': 'pillar', | |
'J': 'pillar_pcre', | |
'L': 'list', | |
'S': 'ipcidr', | |
'E': 'pcre', | |
'N': 'node', | |
None: 'glob'} | |
target_info = parse_target(auth_entry) | |
if not target_info: | |
log.error('Failed to parse valid target "%s"', auth_entry) | |
v_matcher = ref.get(target_info['engine']) | |
v_expr = target_info['pattern'] | |
_res = self.check_minions(v_expr, v_matcher) | |
return set(_res['minions']) | |
def validate_tgt(self, valid, expr, tgt_type, minions=None): | |
''' | |
Return a Bool. This function returns if the expression sent in is | |
within the scope of the valid expression | |
''' | |
v_minions = self._expand_matching(valid) | |
if minions is None: | |
_res = self.check_minions(expr, tgt_type) | |
minions = set(_res['minions']) | |
else: | |
minions = set(minions) | |
d_bool = not bool(minions.difference(v_minions)) | |
if len(v_minions) == len(minions) and d_bool: | |
return True | |
return d_bool | |
def match_check(self, regex, fun): | |
''' | |
Validate a single regex to function comparison, the function argument | |
can be a list of functions. It is all or nothing for a list of | |
functions | |
''' | |
vals = [] | |
if isinstance(fun, six.string_types): | |
fun = [fun] | |
for func in fun: | |
try: | |
if re.match(regex, func): | |
vals.append(True) | |
else: | |
vals.append(False) | |
except Exception: | |
log.error('Invalid regular expression: %s', regex) | |
return vals and all(vals) | |
def any_auth(self, form, auth_list, fun, arg, tgt=None, tgt_type='glob'): | |
''' | |
Read in the form and determine which auth check routine to execute | |
''' | |
# This function is only called from salt.auth.Authorize(), which is also | |
# deprecated and will be removed in Neon. | |
salt.utils.versions.warn_until( | |
'Neon', | |
'The \'any_auth\' function has been deprecated. Support for this ' | |
'function will be removed in Salt {version}.' | |
) | |
if form == 'publish': | |
return self.auth_check( | |
auth_list, | |
fun, | |
arg, | |
tgt, | |
tgt_type) | |
return self.spec_check( | |
auth_list, | |
fun, | |
arg, | |
form) | |
def auth_check_expanded(self, | |
auth_list, | |
funs, | |
args, | |
tgt, | |
tgt_type='glob', | |
groups=None, | |
publish_validate=False): | |
# Here's my thinking | |
# 1. Retrieve anticipated targeted minions | |
# 2. Iterate through each entry in the auth_list | |
# 3. If it is a minion_id, check to see if any targeted minions match. | |
# If there is a match, check to make sure funs are permitted | |
# (if it's not a match we don't care about this auth entry and can | |
# move on) | |
# a. If funs are permitted, Add this minion_id to a new set of allowed minion_ids | |
# If funs are NOT permitted, can short-circuit and return FALSE | |
# b. At the end of the auth_list loop, make sure all targeted IDs | |
# are in the set of allowed minion_ids. If not, return FALSE | |
# 4. If it is a target (glob, pillar, etc), retrieve matching minions | |
# and make sure that ALL targeted minions are in the set. | |
# then check to see if the funs are permitted | |
# a. If ALL targeted minions are not in the set, then return FALSE | |
# b. If the desired fun doesn't mass the auth check with any | |
# auth_entry's fun, then return FALSE | |
# NOTE we are not going to try to allow functions to run on partial | |
# sets of minions. If a user targets a group of minions and does not | |
# have access to run a job on ALL of these minions then the job will | |
# fail with 'Eauth Failed'. | |
# The recommended workflow in that case will be for the user to narrow | |
# his target. | |
# This should cover adding the AD LDAP lookup functionality while | |
# preserving the existing auth behavior. | |
# Recommend we config-get this behind an entry called | |
# auth.enable_expanded_auth_matching | |
# and default to False | |
v_tgt_type = tgt_type | |
if tgt_type.lower() in ('pillar', 'pillar_pcre'): | |
v_tgt_type = 'pillar_exact' | |
elif tgt_type.lower() == 'compound': | |
v_tgt_type = 'compound_pillar_exact' | |
_res = self.check_minions(tgt, v_tgt_type) | |
v_minions = set(_res['minions']) | |
_res = self.check_minions(tgt, tgt_type) | |
minions = set(_res['minions']) | |
mismatch = bool(minions.difference(v_minions)) | |
# If the non-exact match gets more minions than the exact match | |
# then pillar globbing or PCRE is being used, and we have a | |
# problem | |
if publish_validate: | |
if mismatch: | |
return False | |
# compound commands will come in a list so treat everything as a list | |
if not isinstance(funs, list): | |
funs = [funs] | |
args = [args] | |
# Take the auth list and get all the minion names inside it | |
allowed_minions = set() | |
auth_dictionary = {} | |
# Make a set, so we are guaranteed to have only one of each minion | |
# Also iterate through the entire auth_list and create a dictionary | |
# so it's easy to look up what functions are permitted | |
for auth_list_entry in auth_list: | |
if isinstance(auth_list_entry, six.string_types): | |
for fun in funs: | |
# represents toplevel auth entry is a function. | |
# so this fn is permitted by all minions | |
if self.match_check(auth_list_entry, fun): | |
return True | |
continue | |
if isinstance(auth_list_entry, dict): | |
if len(auth_list_entry) != 1: | |
log.info('Malformed ACL: %s', auth_list_entry) | |
continue | |
allowed_minions.update(set(auth_list_entry.keys())) | |
for key in auth_list_entry: | |
for match in self._expand_matching(key): | |
if match in auth_dictionary: | |
auth_dictionary[match].extend(auth_list_entry[key]) | |
else: | |
auth_dictionary[match] = auth_list_entry[key] | |
allowed_minions_from_auth_list = set() | |
for next_entry in allowed_minions: | |
allowed_minions_from_auth_list.update(self._expand_matching(next_entry)) | |
# 'minions' here are all the names of minions matched by the target | |
# if we take out all the allowed minions, and there are any left, then | |
# the target includes minions that are not allowed by eauth | |
# so we can give up here. | |
if len(minions - allowed_minions_from_auth_list) > 0: | |
return False | |
try: | |
for minion in minions: | |
results = [] | |
for num, fun in enumerate(auth_dictionary[minion]): | |
results.append(self.match_check(fun, funs)) | |
if not any(results): | |
return False | |
return True | |
except TypeError: | |
return False | |
return False | |
def auth_check(self, | |
auth_list, | |
funs, | |
args, | |
tgt, | |
tgt_type='glob', | |
groups=None, | |
publish_validate=False, | |
minions=None, | |
whitelist=None): | |
''' | |
Returns a bool which defines if the requested function is authorized. | |
Used to evaluate the standard structure under external master | |
authentication interfaces, like eauth, peer, peer_run, etc. | |
''' | |
if self.opts.get('auth.enable_expanded_auth_matching', False): | |
return self.auth_check_expanded(auth_list, funs, args, tgt, tgt_type, groups, publish_validate) | |
if publish_validate: | |
v_tgt_type = tgt_type | |
if tgt_type.lower() in ('pillar', 'pillar_pcre'): | |
v_tgt_type = 'pillar_exact' | |
elif tgt_type.lower() == 'compound': | |
v_tgt_type = 'compound_pillar_exact' | |
_res = self.check_minions(tgt, v_tgt_type) | |
v_minions = set(_res['minions']) | |
_res = self.check_minions(tgt, tgt_type) | |
minions = set(_res['minions']) | |
mismatch = bool(minions.difference(v_minions)) | |
# If the non-exact match gets more minions than the exact match | |
# then pillar globbing or PCRE is being used, and we have a | |
# problem | |
if mismatch: | |
return False | |
# compound commands will come in a list so treat everything as a list | |
if not isinstance(funs, list): | |
funs = [funs] | |
args = [args] | |
try: | |
for num, fun in enumerate(funs): | |
if whitelist and fun in whitelist: | |
return True | |
for ind in auth_list: | |
if isinstance(ind, six.string_types): | |
# Allowed for all minions | |
if self.match_check(ind, fun): | |
return True | |
elif isinstance(ind, dict): | |
if len(ind) != 1: | |
# Invalid argument | |
continue | |
valid = next(six.iterkeys(ind)) | |
# Check if minions are allowed | |
if self.validate_tgt( | |
valid, | |
tgt, | |
tgt_type, | |
minions=minions): | |
# Minions are allowed, verify function in allowed list | |
fun_args = args[num] | |
fun_kwargs = fun_args[-1] if fun_args else None | |
if isinstance(fun_kwargs, dict) and '__kwarg__' in fun_kwargs: | |
fun_args = list(fun_args) # copy on modify | |
del fun_args[-1] | |
else: | |
fun_kwargs = None | |
if self.__fun_check(ind[valid], fun, fun_args, fun_kwargs): | |
return True | |
except TypeError: | |
return False | |
return False | |
def fill_auth_list_from_groups(self, auth_provider, user_groups, auth_list): | |
''' | |
Returns a list of authorisation matchers that a user is eligible for. | |
This list is a combination of the provided personal matchers plus the | |
matchers of any group the user is in. | |
''' | |
group_names = [item for item in auth_provider if item.endswith('%')] | |
if group_names: | |
for group_name in group_names: | |
if group_name.rstrip("%") in user_groups: | |
for matcher in auth_provider[group_name]: | |
auth_list.append(matcher) | |
return auth_list | |
def fill_auth_list(self, auth_provider, name, groups, auth_list=None, permissive=None): | |
''' | |
Returns a list of authorisation matchers that a user is eligible for. | |
This list is a combination of the provided personal matchers plus the | |
matchers of any group the user is in. | |
''' | |
if auth_list is None: | |
auth_list = [] | |
if permissive is None: | |
permissive = self.opts.get('permissive_acl') | |
name_matched = False | |
for match in auth_provider: | |
if match == '*' and not permissive: | |
continue | |
if match.endswith('%'): | |
if match.rstrip('%') in groups: | |
auth_list.extend(auth_provider[match]) | |
else: | |
if salt.utils.stringutils.expr_match(match, name): | |
name_matched = True | |
auth_list.extend(auth_provider[match]) | |
if not permissive and not name_matched and '*' in auth_provider: | |
auth_list.extend(auth_provider['*']) | |
return auth_list | |
def wheel_check(self, auth_list, fun, args): | |
''' | |
Check special API permissions | |
''' | |
return self.spec_check(auth_list, fun, args, 'wheel') | |
def runner_check(self, auth_list, fun, args): | |
''' | |
Check special API permissions | |
''' | |
return self.spec_check(auth_list, fun, args, 'runner') | |
def spec_check(self, auth_list, fun, args, form): | |
''' | |
Check special API permissions | |
''' | |
if not auth_list: | |
return False | |
if form != 'cloud': | |
comps = fun.split('.') | |
if len(comps) != 2: | |
# Hint at a syntax error when command is passed improperly, | |
# rather than returning an authentication error of some kind. | |
# See Issue #21969 for more information. | |
return {'error': {'name': 'SaltInvocationError', | |
'message': 'A command invocation error occurred: Check syntax.'}} | |
mod_name = comps[0] | |
fun_name = comps[1] | |
else: | |
fun_name = mod_name = fun | |
for ind in auth_list: | |
if isinstance(ind, six.string_types): | |
if ind[0] == '@': | |
if ind[1:] == mod_name or ind[1:] == form or ind == '@{0}s'.format(form): | |
return True | |
elif isinstance(ind, dict): | |
if len(ind) != 1: | |
continue | |
valid = next(six.iterkeys(ind)) | |
if valid[0] == '@': | |
if valid[1:] == mod_name: | |
if self.__fun_check(ind[valid], fun_name, args.get('arg'), args.get('kwarg')): | |
return True | |
if valid[1:] == form or valid == '@{0}s'.format(form): | |
if self.__fun_check(ind[valid], fun, args.get('arg'), args.get('kwarg')): | |
return True | |
return False | |
def __fun_check(self, valid, fun, args=None, kwargs=None): | |
''' | |
Check the given function name (fun) and its arguments (args) against the list of conditions. | |
''' | |
if not isinstance(valid, list): | |
valid = [valid] | |
for cond in valid: | |
# Function name match | |
if isinstance(cond, six.string_types): | |
if self.match_check(cond, fun): | |
return True | |
# Function and args match | |
elif isinstance(cond, dict): | |
if len(cond) != 1: | |
# Invalid argument | |
continue | |
fname_cond = next(six.iterkeys(cond)) | |
if self.match_check(fname_cond, fun): # check key that is function name match | |
if self.__args_check(cond[fname_cond], args, kwargs): | |
return True | |
return False | |
def __args_check(self, valid, args=None, kwargs=None): | |
''' | |
valid is a dicts: {'args': [...], 'kwargs': {...}} or a list of such dicts. | |
''' | |
if not isinstance(valid, list): | |
valid = [valid] | |
for cond in valid: | |
if not isinstance(cond, dict): | |
# Invalid argument | |
continue | |
# whitelist args, kwargs | |
cond_args = cond.get('args', []) | |
good = True | |
for i, cond_arg in enumerate(cond_args): | |
if args is None or len(args) <= i: | |
good = False | |
break | |
if cond_arg is None: # None == '.*' i.e. allow any | |
continue | |
if not self.match_check(cond_arg, six.text_type(args[i])): | |
good = False | |
break | |
if not good: | |
continue | |
# Check kwargs | |
cond_kwargs = cond.get('kwargs', {}) | |
for k, v in six.iteritems(cond_kwargs): | |
if kwargs is None or k not in kwargs: | |
good = False | |
break | |
if v is None: # None == '.*' i.e. allow any | |
continue | |
if not self.match_check(v, six.text_type(kwargs[k])): | |
good = False | |
break | |
if good: | |
return True | |
return False | |
def mine_get(tgt, fun, tgt_type='glob', opts=None): | |
''' | |
Gathers the data from the specified minions' mine, pass in the target, | |
function to look up and the target type | |
''' | |
ret = {} | |
serial = salt.payload.Serial(opts) | |
checker = CkMinions(opts) | |
_res = checker.check_minions( | |
tgt, | |
tgt_type) | |
minions = _res['minions'] | |
cache = salt.cache.factory(opts) | |
for minion in minions: | |
mdata = cache.fetch('minions/{0}'.format(minion), 'mine') | |
if mdata is None: | |
continue | |
fdata = mdata.get(fun) | |
if fdata: | |
ret[minion] = fdata | |
return ret |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment