Created
January 8, 2013 21:43
-
-
Save ewindisch/4488269 to your computer and use it in GitHub Desktop.
openstack common (oslo) log.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# vim: tabstop=4 shiftwidth=4 softtabstop=4 | |
# Copyright 2011 OpenStack LLC. | |
# Copyright 2010 United States Government as represented by the | |
# Administrator of the National Aeronautics and Space Administration. | |
# All Rights Reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); you may | |
# not use this file except in compliance with the License. You may obtain | |
# a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |
# License for the specific language governing permissions and limitations | |
# under the License. | |
"""Openstack logging handler. | |
This module adds to logging functionality by adding the option to specify | |
a context object when calling the various log methods. If the context object | |
is not specified, default formatting is used. Additionally, an instance uuid | |
may be passed as part of the log message, which is intended to make it easier | |
for admins to find messages related to a specific instance. | |
It also allows setting of formatting information through conf. | |
""" | |
import cStringIO | |
import inspect | |
import itertools | |
import logging | |
import logging.config | |
import logging.handlers | |
import os | |
import Queue | |
import stat | |
import sys | |
import traceback | |
from openstack.common import cfg | |
from openstack.common.gettextutils import _ | |
from openstack.common import jsonutils | |
from openstack.common import local | |
from openstack.common import notifier | |
import eventlet | |
log_opts = [ | |
cfg.BoolOpt('logging_async', | |
default=False, | |
help='asynchronously write logs in a background thread'), | |
cfg.IntOpt('logging_async_backlog', | |
default=0, | |
help='Number of messages to backlog. ' | |
'Value of 0 is unlimited. Falls back to synchronous ' | |
'operation if backlog is full.'), | |
cfg.StrOpt('logging_context_format_string', | |
default='%(asctime)s %(levelname)s %(name)s [%(request_id)s ' | |
'%(user_id)s %(project_id)s] %(instance)s' | |
'%(message)s', | |
help='format string to use for log messages with context'), | |
cfg.StrOpt('logging_default_format_string', | |
default='%(asctime)s %(process)d %(levelname)s %(name)s [-]' | |
' %(instance)s%(message)s', | |
help='format string to use for log messages without context'), | |
cfg.StrOpt('logging_debug_format_suffix', | |
default='%(funcName)s %(pathname)s:%(lineno)d', | |
help='data to append to log format when level is DEBUG'), | |
cfg.StrOpt('logging_exception_prefix', | |
default='%(asctime)s %(process)d TRACE %(name)s %(instance)s', | |
help='prefix each line of exception output with this format'), | |
cfg.ListOpt('default_log_levels', | |
default=[ | |
'amqplib=WARN', | |
'sqlalchemy=WARN', | |
'boto=WARN', | |
'suds=INFO', | |
'keystone=INFO', | |
'eventlet.wsgi.server=WARN' | |
], | |
help='list of logger=LEVEL pairs'), | |
cfg.BoolOpt('publish_errors', | |
default=False, | |
help='publish error events'), | |
cfg.BoolOpt('fatal_deprecations', | |
default=False, | |
help='make deprecations fatal'), | |
# NOTE(mikal): there are two options here because sometimes we are handed | |
# a full instance (and could include more information), and other times we | |
# are just handed a UUID for the instance. | |
cfg.StrOpt('instance_format', | |
default='[instance: %(uuid)s] ', | |
help='If an instance is passed with the log message, format ' | |
'it like this'), | |
cfg.StrOpt('instance_uuid_format', | |
default='[instance: %(uuid)s] ', | |
help='If an instance UUID is passed with the log message, ' | |
'format it like this'), | |
] | |
generic_log_opts = [ | |
cfg.StrOpt('logdir', | |
default=None, | |
help='Log output to a per-service log file in named directory'), | |
cfg.StrOpt('logfile', | |
default=None, | |
help='Log output to a named file'), | |
cfg.BoolOpt('use_stderr', | |
default=True, | |
help='Log output to standard error'), | |
cfg.StrOpt('logfile_mode', | |
default='0644', | |
help='Default file mode used when creating log files'), | |
] | |
CONF = cfg.CONF | |
CONF.register_opts(generic_log_opts) | |
CONF.register_opts(log_opts) | |
# our new audit level | |
# NOTE(jkoelker) Since we synthesized an audit level, make the logging | |
# module aware of it so it acts like other levels. | |
logging.AUDIT = logging.INFO + 1 | |
logging.addLevelName(logging.AUDIT, 'AUDIT') | |
try: | |
NullHandler = logging.NullHandler | |
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 | |
class NullHandler(logging.Handler): | |
def handle(self, record): | |
pass | |
def emit(self, record): | |
pass | |
def createLock(self): | |
self.lock = None | |
def _dictify_context(context): | |
if context is None: | |
return None | |
if not isinstance(context, dict) and getattr(context, 'to_dict', None): | |
context = context.to_dict() | |
return context | |
def _get_binary_name(): | |
return os.path.basename(inspect.stack()[-1][1]) | |
def _get_log_file_path(binary=None): | |
logfile = CONF.log_file or CONF.logfile | |
logdir = CONF.log_dir or CONF.logdir | |
if logfile and not logdir: | |
return logfile | |
if logfile and logdir: | |
return os.path.join(logdir, logfile) | |
if logdir: | |
binary = binary or _get_binary_name() | |
return '%s.log' % (os.path.join(logdir, binary),) | |
class ContextAdapter(logging.LoggerAdapter): | |
warn = logging.LoggerAdapter.warning | |
def __init__(self, logger, project_name, version_string): | |
self.logger = logger | |
self.project = project_name | |
self.version = version_string | |
def audit(self, msg, *args, **kwargs): | |
self.log(logging.AUDIT, msg, *args, **kwargs) | |
def deprecated(self, msg, *args, **kwargs): | |
stdmsg = _("Deprecated Config: %s") % msg | |
if CONF.fatal_deprecations: | |
self.critical(stdmsg, *args, **kwargs) | |
raise DeprecatedConfig(msg=stdmsg) | |
else: | |
self.warn(stdmsg, *args, **kwargs) | |
def process(self, msg, kwargs): | |
if 'extra' not in kwargs: | |
kwargs['extra'] = {} | |
extra = kwargs['extra'] | |
context = kwargs.pop('context', None) | |
if not context: | |
context = getattr(local.store, 'context', None) | |
if context: | |
extra.update(_dictify_context(context)) | |
instance = kwargs.pop('instance', None) | |
instance_extra = '' | |
if instance: | |
instance_extra = CONF.instance_format % instance | |
else: | |
instance_uuid = kwargs.pop('instance_uuid', None) | |
if instance_uuid: | |
instance_extra = (CONF.instance_uuid_format | |
% {'uuid': instance_uuid}) | |
extra.update({'instance': instance_extra}) | |
extra.update({"project": self.project}) | |
extra.update({"version": self.version}) | |
extra['extra'] = extra.copy() | |
return msg, kwargs | |
class JSONFormatter(logging.Formatter): | |
def __init__(self, fmt=None, datefmt=None): | |
# NOTE(jkoelker) we ignore the fmt argument, but its still there | |
# since logging.config.fileConfig passes it. | |
self.datefmt = datefmt | |
def formatException(self, ei, strip_newlines=True): | |
lines = traceback.format_exception(*ei) | |
if strip_newlines: | |
lines = [itertools.ifilter( | |
lambda x: x, | |
line.rstrip().splitlines()) for line in lines] | |
lines = list(itertools.chain(*lines)) | |
return lines | |
def format(self, record): | |
message = {'message': record.getMessage(), | |
'asctime': self.formatTime(record, self.datefmt), | |
'name': record.name, | |
'msg': record.msg, | |
'args': record.args, | |
'levelname': record.levelname, | |
'levelno': record.levelno, | |
'pathname': record.pathname, | |
'filename': record.filename, | |
'module': record.module, | |
'lineno': record.lineno, | |
'funcname': record.funcName, | |
'created': record.created, | |
'msecs': record.msecs, | |
'relative_created': record.relativeCreated, | |
'thread': record.thread, | |
'thread_name': record.threadName, | |
'process_name': record.processName, | |
'process': record.process, | |
'traceback': None} | |
if hasattr(record, 'extra'): | |
message['extra'] = record.extra | |
if record.exc_info: | |
message['traceback'] = self.formatException(record.exc_info) | |
return jsonutils.dumps(message) | |
class PublishErrorsHandler(logging.Handler): | |
def emit(self, record): | |
if ('openstack.common.notifier.log_notifier' in | |
CONF.notification_driver): | |
return | |
notifier.api.notify(None, 'error.publisher', | |
'error_notification', | |
notifier.api.ERROR, | |
dict(error=record.msg)) | |
def _create_logging_excepthook(product_name): | |
def logging_excepthook(type, value, tb): | |
extra = {} | |
if CONF.verbose: | |
extra['exc_info'] = (type, value, tb) | |
getLogger(product_name).critical(str(value), **extra) | |
return logging_excepthook | |
def setup(product_name): | |
"""Setup logging.""" | |
sys.excepthook = _create_logging_excepthook(product_name) | |
# The ThreadedLogger class provides a background | |
# greenthread to asynchronously (but serially) | |
# process log messages. | |
if CONF.logging_async: | |
logging.setLoggerClass(ThreadedLogger) | |
if CONF.log_config: | |
try: | |
logging.config.fileConfig(CONF.log_config) | |
except Exception: | |
traceback.print_exc() | |
raise | |
else: | |
_setup_logging_from_conf(product_name) | |
def _find_facility_from_conf(): | |
facility_names = logging.handlers.SysLogHandler.facility_names | |
facility = getattr(logging.handlers.SysLogHandler, | |
CONF.syslog_log_facility, | |
None) | |
if facility is None and CONF.syslog_log_facility in facility_names: | |
facility = facility_names.get(CONF.syslog_log_facility) | |
if facility is None: | |
valid_facilities = facility_names.keys() | |
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', | |
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', | |
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', | |
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', | |
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] | |
valid_facilities.extend(consts) | |
raise TypeError(_('syslog facility must be one of: %s') % | |
', '.join("'%s'" % fac | |
for fac in valid_facilities)) | |
return facility | |
def _setup_logging_from_conf(product_name): | |
log_root = getLogger(product_name).logger | |
for handler in log_root.handlers: | |
log_root.removeHandler(handler) | |
if CONF.use_syslog: | |
facility = _find_facility_from_conf() | |
syslog = logging.handlers.SysLogHandler(address='/dev/log', | |
facility=facility) | |
log_root.addHandler(syslog) | |
logpath = _get_log_file_path() | |
if logpath: | |
filelog = logging.handlers.WatchedFileHandler(logpath) | |
log_root.addHandler(filelog) | |
mode = int(CONF.logfile_mode, 8) | |
st = os.stat(logpath) | |
if st.st_mode != (stat.S_IFREG | mode): | |
os.chmod(logpath, mode) | |
if CONF.use_stderr: | |
streamlog = ColorHandler() | |
log_root.addHandler(streamlog) | |
elif not CONF.log_file: | |
# pass sys.stdout as a positional argument | |
# python2.6 calls the argument strm, in 2.7 it's stream | |
streamlog = logging.StreamHandler(sys.stdout) | |
log_root.addHandler(streamlog) | |
if CONF.publish_errors: | |
log_root.addHandler(PublishErrorsHandler(logging.ERROR)) | |
for handler in log_root.handlers: | |
datefmt = CONF.log_date_format | |
if CONF.log_format: | |
handler.setFormatter(logging.Formatter(fmt=CONF.log_format, | |
datefmt=datefmt)) | |
handler.setFormatter(LegacyFormatter(datefmt=datefmt)) | |
if CONF.verbose or CONF.debug: | |
log_root.setLevel(logging.DEBUG) | |
else: | |
log_root.setLevel(logging.INFO) | |
level = logging.NOTSET | |
for pair in CONF.default_log_levels: | |
mod, _sep, level_name = pair.partition('=') | |
level = logging.getLevelName(level_name) | |
logger = logging.getLogger(mod) | |
logger.setLevel(level) | |
for handler in log_root.handlers: | |
logger.addHandler(handler) | |
_loggers = {} | |
_threadedlogger_queue = None | |
def _threadedlogger_proc(self): | |
"""Process queued log messages""" | |
while True: | |
try: | |
handle, args = _threadedlogger_queue.get() | |
handle(*args) | |
_threadedlogger_queue.task_done() | |
except: | |
# Don't let the logger die. | |
# Generate a log message manually, | |
# since we can't directly use the | |
# standard logging methods here. | |
# TODO(ewindisch): write a test for this. | |
try: | |
fn, lno, func = self.findCaller() | |
except ValueError: | |
fn, lno, func = "(unknown file)", 0, "(unknown function)" | |
try: | |
msg = _("Error in writing log message.") | |
r = getLogger().makeRecord('ThreadedLogger', logging.ERROR, | |
fn, lno, msg, [], sys.exc_info(), | |
func, None) | |
handle(args[0], r) | |
return | |
except: | |
# If THIS happens, logging is really busted. | |
# ... just dump something to stderr | |
sys.exc_info()[2].print_tb() | |
def _threadedlogger_asynclog(fn): | |
"""Queue a log message""" | |
def queue_func(self, *args): | |
_threadedlogger_queue.put([fn, args]) | |
return queue_func | |
class ThreadedLogger(logging.Logger): | |
"""ThreadedLogger: serialized, asynchronous logging. | |
Using ThreadedLogger will not block on logging in eventlet, | |
but will keep logs in order by serializing them through an | |
in-memory queue. | |
""" | |
def __init__(self, name): | |
# Use a global queue because instance-level | |
# decorators aren't easy (and overkill). | |
# Also, this might be preferable for logging. | |
global _threadedlogger_queue | |
if not _threadedlogger_queue: | |
_threadedlogger_queue = Queue.Queue(CONF.logging_async_backlog) | |
eventlet.spawn_n(_threadedlogger_proc, self) | |
logging.Logger.__init__(self, name) | |
@_threadedlogger_asynclog | |
def handle(self, record): | |
super().handle(self, record) | |
def getLogger(name='unknown', version='unknown'): | |
if name not in _loggers: | |
_loggers[name] = ContextAdapter(logging.getLogger(name), | |
name, | |
version) | |
return _loggers[name] | |
class WritableLogger(object): | |
"""A thin wrapper that responds to `write` and logs.""" | |
def __init__(self, logger, level=logging.INFO): | |
self.logger = logger | |
self.level = level | |
def write(self, msg): | |
self.logger.log(self.level, msg) | |
class LegacyFormatter(logging.Formatter): | |
"""A context.RequestContext aware formatter configured through flags. | |
The flags used to set format strings are: logging_context_format_string | |
and logging_default_format_string. You can also specify | |
logging_debug_format_suffix to append extra formatting if the log level is | |
debug. | |
For information about what variables are available for the formatter see: | |
http://docs.python.org/library/logging.html#formatter | |
""" | |
def format(self, record): | |
"""Uses contextstring if request_id is set, otherwise default.""" | |
# NOTE(sdague): default the fancier formating params | |
# to an empty string so we don't throw an exception if | |
# they get used | |
for key in ('instance', 'color'): | |
if key not in record.__dict__: | |
record.__dict__[key] = '' | |
if record.__dict__.get('request_id', None): | |
self._fmt = CONF.logging_context_format_string | |
else: | |
self._fmt = CONF.logging_default_format_string | |
if (record.levelno == logging.DEBUG and | |
CONF.logging_debug_format_suffix): | |
self._fmt += " " + CONF.logging_debug_format_suffix | |
# Cache this on the record, Logger will respect our formated copy | |
if record.exc_info: | |
record.exc_text = self.formatException(record.exc_info, record) | |
return logging.Formatter.format(self, record) | |
def formatException(self, exc_info, record=None): | |
"""Format exception output with CONF.logging_exception_prefix.""" | |
if not record: | |
return logging.Formatter.formatException(self, exc_info) | |
stringbuffer = cStringIO.StringIO() | |
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], | |
None, stringbuffer) | |
lines = stringbuffer.getvalue().split('\n') | |
stringbuffer.close() | |
if CONF.logging_exception_prefix.find('%(asctime)') != -1: | |
record.asctime = self.formatTime(record, self.datefmt) | |
formatted_lines = [] | |
for line in lines: | |
pl = CONF.logging_exception_prefix % record.__dict__ | |
fl = '%s%s' % (pl, line) | |
formatted_lines.append(fl) | |
return '\n'.join(formatted_lines) | |
class ColorHandler(logging.StreamHandler): | |
LEVEL_COLORS = { | |
logging.DEBUG: '\033[00;32m', # GREEN | |
logging.INFO: '\033[00;36m', # CYAN | |
logging.AUDIT: '\033[01;36m', # BOLD CYAN | |
logging.WARN: '\033[01;33m', # BOLD YELLOW | |
logging.ERROR: '\033[01;31m', # BOLD RED | |
logging.CRITICAL: '\033[01;31m', # BOLD RED | |
} | |
def format(self, record): | |
record.color = self.LEVEL_COLORS[record.levelno] | |
return logging.StreamHandler.format(self, record) | |
class DeprecatedConfig(Exception): | |
message = _("Fatal call to deprecated config: %(msg)s") | |
def __init__(self, msg): | |
super(Exception, self).__init__(self.message % dict(msg=msg)) | |
def close(): | |
"""Close the logging system. | |
Stops accepting messages and flushes. | |
""" | |
if _threadedlogger_queue: | |
# Flush messages. | |
_threadedlogger_queue.join() | |
# Reset to None, so ThreadedLogger might be reinitialized. | |
_threadedlogger_queue = None | |
for name in _loggers: | |
# Flush messages. | |
_loggers[name].shutdown() | |
# Reset to None so getLoggers can reinitialize this. | |
_loggers[name] = None |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment