Ausgabe der neuen DB Einträge

This commit is contained in:
hubobel 2022-01-02 21:50:48 +01:00
parent bad48e1627
commit cfbbb9ee3d
2399 changed files with 843193 additions and 43 deletions

View file

@ -0,0 +1,130 @@
# -*- test-case-name: twisted.logger.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Twisted Logger: Classes and functions to do granular logging.
Example usage in a module C{some.module}::
from twisted.logger import Logger
log = Logger()
def handleData(data):
log.debug("Got data: {data!r}.", data=data)
Or in a class::
from twisted.logger import Logger
class Foo(object):
log = Logger()
def oops(self, data):
self.log.error("Oops! Invalid data from server: {data!r}",
data=data)
C{Logger}s have namespaces, for which logging can be configured independently.
Namespaces may be specified by passing in a C{namespace} argument to L{Logger}
when instantiating it, but if none is given, the logger will derive its own
namespace by using the module name of the callable that instantiated it, or, in
the case of a class, by using the fully qualified name of the class.
In the first example above, the namespace would be C{some.module}, and in the
second example, it would be C{some.module.Foo}.
@var globalLogPublisher: The L{LogPublisher} that all L{Logger} instances that
are not otherwise parameterized will point to by default.
@type globalLogPublisher: L{LogPublisher}
@var globalLogBeginner: The L{LogBeginner} used to activate the main log
observer, whether it's a log file, or an observer pointing at stderr.
@type globalLogBeginner: L{LogBeginner}
"""
__all__ = [
# From ._levels
"InvalidLogLevelError", "LogLevel",
# From ._format
"formatEvent", "formatEventAsClassicLogText",
"formatTime", "timeFormatRFC3339",
"eventAsText",
# From ._flatten
"extractField",
# From ._logger
"Logger", "_loggerFor",
# From ._observer
"ILogObserver", "LogPublisher",
# From ._buffer
"LimitedHistoryLogObserver",
# From ._file
"FileLogObserver", "textFileLogObserver",
# From ._filter
"PredicateResult", "ILogFilterPredicate",
"FilteringLogObserver", "LogLevelFilterPredicate",
# From ._stdlib
"STDLibLogObserver",
# From ._io
"LoggingFile",
# From ._legacy
"LegacyLogObserverWrapper",
# From ._global
"globalLogPublisher", "globalLogBeginner", "LogBeginner",
# From ._json
"eventAsJSON", "eventFromJSON",
"jsonFileLogObserver", "eventsFromJSONLogFile",
# From ._capture
"capturedLogs",
]
from ._levels import InvalidLogLevelError, LogLevel
from ._flatten import extractField
from ._format import (
formatEvent, formatEventAsClassicLogText, formatTime, timeFormatRFC3339,
eventAsText
)
from ._logger import Logger, _loggerFor
from ._observer import ILogObserver, LogPublisher
from ._buffer import LimitedHistoryLogObserver
from ._file import FileLogObserver, textFileLogObserver
from ._filter import (
PredicateResult, ILogFilterPredicate, FilteringLogObserver,
LogLevelFilterPredicate
)
from ._stdlib import STDLibLogObserver
from ._io import LoggingFile
from ._legacy import LegacyLogObserverWrapper
from ._global import (
globalLogPublisher, globalLogBeginner, LogBeginner
)
from ._json import (
eventAsJSON, eventFromJSON,
jsonFileLogObserver, eventsFromJSONLogFile
)
from ._capture import capturedLogs

View file

@ -0,0 +1,59 @@
# -*- test-case-name: twisted.logger.test.test_buffer -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Log observer that maintains a buffer.
"""
from collections import deque
from zope.interface import implementer
from ._observer import ILogObserver
_DEFAULT_BUFFER_MAXIMUM = 64 * 1024
@implementer(ILogObserver)
class LimitedHistoryLogObserver(object):
"""
L{ILogObserver} that stores events in a buffer of a fixed size::
>>> from twisted.logger import LimitedHistoryLogObserver
>>> history = LimitedHistoryLogObserver(5)
>>> for n in range(10): history({'n': n})
...
>>> repeats = []
>>> history.replayTo(repeats.append)
>>> len(repeats)
5
>>> repeats
[{'n': 5}, {'n': 6}, {'n': 7}, {'n': 8}, {'n': 9}]
>>>
"""
def __init__(self, size=_DEFAULT_BUFFER_MAXIMUM):
"""
@param size: The maximum number of events to buffer. If L{None}, the
buffer is unbounded.
@type size: L{int}
"""
self._buffer = deque(maxlen=size)
def __call__(self, event):
self._buffer.append(event)
def replayTo(self, otherObserver):
"""
Re-play the buffered events to another log observer.
@param otherObserver: An observer to replay events to.
@type otherObserver: L{ILogObserver}
"""
for event in self._buffer:
otherObserver(event)

View file

@ -0,0 +1,24 @@
# -*- test-case-name: twisted.logger.test.test_capture -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Context manager for capturing logs.
"""
from contextlib import contextmanager
from twisted.logger import globalLogPublisher
@contextmanager
def capturedLogs():
events = []
observer = events.append
globalLogPublisher.addObserver(observer)
yield events
globalLogPublisher.removeObserver(observer)

View file

@ -0,0 +1,86 @@
# -*- test-case-name: twisted.logger.test.test_file -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
File log observer.
"""
from zope.interface import implementer
from twisted.python.compat import ioType, unicode
from ._observer import ILogObserver
from ._format import formatTime
from ._format import timeFormatRFC3339
from ._format import formatEventAsClassicLogText
@implementer(ILogObserver)
class FileLogObserver(object):
"""
Log observer that writes to a file-like object.
"""
def __init__(self, outFile, formatEvent):
"""
@param outFile: A file-like object. Ideally one should be passed which
accepts L{unicode} data. Otherwise, UTF-8 L{bytes} will be used.
@type outFile: L{io.IOBase}
@param formatEvent: A callable that formats an event.
@type formatEvent: L{callable} that takes an C{event} argument and
returns a formatted event as L{unicode}.
"""
if ioType(outFile) is not unicode:
self._encoding = "utf-8"
else:
self._encoding = None
self._outFile = outFile
self.formatEvent = formatEvent
def __call__(self, event):
"""
Write event to file.
@param event: An event.
@type event: L{dict}
"""
text = self.formatEvent(event)
if text is None:
text = u""
if self._encoding is not None:
text = text.encode(self._encoding)
if text:
self._outFile.write(text)
self._outFile.flush()
def textFileLogObserver(outFile, timeFormat=timeFormatRFC3339):
"""
Create a L{FileLogObserver} that emits text to a specified (writable)
file-like object.
@param outFile: A file-like object. Ideally one should be passed which
accepts L{unicode} data. Otherwise, UTF-8 L{bytes} will be used.
@type outFile: L{io.IOBase}
@param timeFormat: The format to use when adding timestamp prefixes to
logged events. If L{None}, or for events with no C{"log_timestamp"}
key, the default timestamp prefix of C{u"-"} is used.
@type timeFormat: L{unicode} or L{None}
@return: A file log observer.
@rtype: L{FileLogObserver}
"""
def formatEvent(event):
return formatEventAsClassicLogText(
event, formatTime=lambda e: formatTime(e, timeFormat)
)
return FileLogObserver(outFile, formatEvent)

View file

@ -0,0 +1,231 @@
# -*- test-case-name: twisted.logger.test.test_filter -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Filtering log observer.
"""
from functools import partial
from zope.interface import Interface, implementer
from constantly import NamedConstant, Names
from ._levels import InvalidLogLevelError, LogLevel
from ._observer import ILogObserver
class PredicateResult(Names):
"""
Predicate results.
@see: L{LogLevelFilterPredicate}
@cvar yes: Log the specified event. When this value is used,
L{FilteringLogObserver} will always log the message, without
evaluating other predicates.
@cvar no: Do not log the specified event. When this value is used,
L{FilteringLogObserver} will I{not} log the message, without
evaluating other predicates.
@cvar maybe: Do not have an opinion on the event. When this value is used,
L{FilteringLogObserver} will consider subsequent predicate results;
if returned by the last predicate being considered, then the event will
be logged.
"""
yes = NamedConstant()
no = NamedConstant()
maybe = NamedConstant()
class ILogFilterPredicate(Interface):
"""
A predicate that determined whether an event should be logged.
"""
def __call__(event):
"""
Determine whether an event should be logged.
@returns: a L{PredicateResult}.
"""
def shouldLogEvent(predicates, event):
"""
Determine whether an event should be logged, based on the result of
C{predicates}.
By default, the result is C{True}; so if there are no predicates,
everything will be logged.
If any predicate returns C{yes}, then we will immediately return C{True}.
If any predicate returns C{no}, then we will immediately return C{False}.
As predicates return C{maybe}, we keep calling the next predicate until we
run out, at which point we return C{True}.
@param predicates: The predicates to use.
@type predicates: iterable of L{ILogFilterPredicate}
@param event: An event
@type event: L{dict}
@return: True if the message should be forwarded on, C{False} if not.
@rtype: L{bool}
"""
for predicate in predicates:
result = predicate(event)
if result == PredicateResult.yes:
return True
if result == PredicateResult.no:
return False
if result == PredicateResult.maybe:
continue
raise TypeError("Invalid predicate result: {0!r}".format(result))
return True
@implementer(ILogObserver)
class FilteringLogObserver(object):
"""
L{ILogObserver} that wraps another L{ILogObserver}, but filters out events
based on applying a series of L{ILogFilterPredicate}s.
"""
def __init__(
self, observer, predicates,
negativeObserver=lambda event: None
):
"""
@param observer: An observer to which this observer will forward
events when C{predictates} yield a positive result.
@type observer: L{ILogObserver}
@param predicates: Predicates to apply to events before forwarding to
the wrapped observer.
@type predicates: ordered iterable of predicates
@param negativeObserver: An observer to which this observer will
forward events when C{predictates} yield a negative result.
@type negativeObserver: L{ILogObserver}
"""
self._observer = observer
self._shouldLogEvent = partial(shouldLogEvent, list(predicates))
self._negativeObserver = negativeObserver
def __call__(self, event):
"""
Forward to next observer if predicate allows it.
"""
if self._shouldLogEvent(event):
if "log_trace" in event:
event["log_trace"].append((self, self.observer))
self._observer(event)
else:
self._negativeObserver(event)
@implementer(ILogFilterPredicate)
class LogLevelFilterPredicate(object):
"""
L{ILogFilterPredicate} that filters out events with a log level lower than
the log level for the event's namespace.
Events that not not have a log level or namespace are also dropped.
"""
def __init__(self, defaultLogLevel=LogLevel.info):
"""
@param defaultLogLevel: The default minimum log level.
@type defaultLogLevel: L{LogLevel}
"""
self._logLevelsByNamespace = {}
self.defaultLogLevel = defaultLogLevel
self.clearLogLevels()
def logLevelForNamespace(self, namespace):
"""
Determine an appropriate log level for the given namespace.
This respects dots in namespaces; for example, if you have previously
invoked C{setLogLevelForNamespace("mypackage", LogLevel.debug)}, then
C{logLevelForNamespace("mypackage.subpackage")} will return
C{LogLevel.debug}.
@param namespace: A logging namespace, or L{None} for the default
namespace.
@type namespace: L{str} (native string)
@return: The log level for the specified namespace.
@rtype: L{LogLevel}
"""
if not namespace:
return self._logLevelsByNamespace[None]
if namespace in self._logLevelsByNamespace:
return self._logLevelsByNamespace[namespace]
segments = namespace.split(".")
index = len(segments) - 1
while index > 0:
namespace = ".".join(segments[:index])
if namespace in self._logLevelsByNamespace:
return self._logLevelsByNamespace[namespace]
index -= 1
return self._logLevelsByNamespace[None]
def setLogLevelForNamespace(self, namespace, level):
"""
Sets the log level for a logging namespace.
@param namespace: A logging namespace.
@type namespace: L{str} (native string)
@param level: The log level for the given namespace.
@type level: L{LogLevel}
"""
if level not in LogLevel.iterconstants():
raise InvalidLogLevelError(level)
if namespace:
self._logLevelsByNamespace[namespace] = level
else:
self._logLevelsByNamespace[None] = level
def clearLogLevels(self):
"""
Clears all log levels to the default.
"""
self._logLevelsByNamespace.clear()
self._logLevelsByNamespace[None] = self.defaultLogLevel
def __call__(self, event):
eventLevel = event.get("log_level", None)
namespace = event.get("log_namespace", None)
namespaceLevel = self.logLevelForNamespace(namespace)
if (
eventLevel is None or
namespace is None or
LogLevel._priorityForLevel(eventLevel) <
LogLevel._priorityForLevel(namespaceLevel)
):
return PredicateResult.no
return PredicateResult.maybe

View file

@ -0,0 +1,178 @@
# -*- test-case-name: twisted.logger.test.test_flatten -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Code related to "flattening" events; that is, extracting a description of all
relevant fields from the format string and persisting them for later
examination.
"""
from string import Formatter
from collections import defaultdict
from twisted.python.compat import unicode
aFormatter = Formatter()
class KeyFlattener(object):
"""
A L{KeyFlattener} computes keys for the things within curly braces in
PEP-3101-style format strings as parsed by L{string.Formatter.parse}.
"""
def __init__(self):
"""
Initialize a L{KeyFlattener}.
"""
self.keys = defaultdict(lambda: 0)
def flatKey(self, fieldName, formatSpec, conversion):
"""
Compute a string key for a given field/format/conversion.
@param fieldName: A format field name.
@type fieldName: L{str}
@param formatSpec: A format spec.
@type formatSpec: L{str}
@param conversion: A format field conversion type.
@type conversion: L{str}
@return: A key specific to the given field, format and conversion, as
well as the occurrence of that combination within this
L{KeyFlattener}'s lifetime.
@rtype: L{str}
"""
result = (
"{fieldName}!{conversion}:{formatSpec}"
.format(
fieldName=fieldName,
formatSpec=(formatSpec or ""),
conversion=(conversion or ""),
)
)
self.keys[result] += 1
n = self.keys[result]
if n != 1:
result += "/" + str(self.keys[result])
return result
def flattenEvent(event):
"""
Flatten the given event by pre-associating format fields with specific
objects and callable results in a L{dict} put into the C{"log_flattened"}
key in the event.
@param event: A logging event.
@type event: L{dict}
"""
if event.get("log_format", None) is None:
return
if "log_flattened" in event:
fields = event["log_flattened"]
else:
fields = {}
keyFlattener = KeyFlattener()
for (literalText, fieldName, formatSpec, conversion) in (
aFormatter.parse(event["log_format"])
):
if fieldName is None:
continue
if conversion != "r":
conversion = "s"
flattenedKey = keyFlattener.flatKey(fieldName, formatSpec, conversion)
structuredKey = keyFlattener.flatKey(fieldName, formatSpec, "")
if flattenedKey in fields:
# We've already seen and handled this key
continue
if fieldName.endswith(u"()"):
fieldName = fieldName[:-2]
callit = True
else:
callit = False
field = aFormatter.get_field(fieldName, (), event)
fieldValue = field[0]
if conversion == "r":
conversionFunction = repr
else: # Above: if conversion is not "r", it's "s"
conversionFunction = unicode
if callit:
fieldValue = fieldValue()
flattenedValue = conversionFunction(fieldValue)
fields[flattenedKey] = flattenedValue
fields[structuredKey] = fieldValue
if fields:
event["log_flattened"] = fields
def extractField(field, event):
"""
Extract a given format field from the given event.
@param field: A string describing a format field or log key. This is the
text that would normally fall between a pair of curly braces in a
format string: for example, C{"key[2].attribute"}. If a conversion is
specified (the thing after the C{"!"} character in a format field) then
the result will always be L{unicode}.
@type field: L{str} (native string)
@param event: A log event.
@type event: L{dict}
@return: A value extracted from the field.
@rtype: L{object}
@raise KeyError: if the field is not found in the given event.
"""
keyFlattener = KeyFlattener()
[[literalText, fieldName, formatSpec, conversion]] = aFormatter.parse(
"{" + field + "}"
)
key = keyFlattener.flatKey(fieldName, formatSpec, conversion)
if "log_flattened" not in event:
flattenEvent(event)
return event["log_flattened"][key]
def flatFormat(event):
"""
Format an event which has been flattened with L{flattenEvent}.
@param event: A logging event.
@type event: L{dict}
@return: A formatted string.
@rtype: L{unicode}
"""
fieldValues = event["log_flattened"]
s = []
keyFlattener = KeyFlattener()
formatFields = aFormatter.parse(event["log_format"])
for literalText, fieldName, formatSpec, conversion in formatFields:
s.append(literalText)
if fieldName is not None:
key = keyFlattener.flatKey(
fieldName, formatSpec, conversion or "s")
s.append(unicode(fieldValues[key]))
return u"".join(s)

View file

@ -0,0 +1,421 @@
# -*- test-case-name: twisted.logger.test.test_format -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tools for formatting logging events.
"""
from datetime import datetime as DateTime
from twisted.python.compat import unicode
from twisted.python.failure import Failure
from twisted.python.reflect import safe_repr
from twisted.python._tzhelper import FixedOffsetTimeZone
from ._flatten import flatFormat, aFormatter
timeFormatRFC3339 = "%Y-%m-%dT%H:%M:%S%z"
def formatEvent(event):
"""
Formats an event as a L{unicode}, using the format in
C{event["log_format"]}.
This implementation should never raise an exception; if the formatting
cannot be done, the returned string will describe the event generically so
that a useful message is emitted regardless.
@param event: A logging event.
@type event: L{dict}
@return: A formatted string.
@rtype: L{unicode}
"""
return eventAsText(
event,
includeTraceback=False,
includeTimestamp=False,
includeSystem=False,
)
def formatUnformattableEvent(event, error):
"""
Formats an event as a L{unicode} that describes the event generically and a
formatting error.
@param event: A logging event.
@type event: L{dict}
@param error: The formatting error.
@type error: L{Exception}
@return: A formatted string.
@rtype: L{unicode}
"""
try:
return (
u"Unable to format event {event!r}: {error}"
.format(event=event, error=error)
)
except BaseException:
# Yikes, something really nasty happened.
#
# Try to recover as much formattable data as possible; hopefully at
# least the namespace is sane, which will help you find the offending
# logger.
failure = Failure()
text = u", ".join(
u" = ".join((safe_repr(key), safe_repr(value)))
for key, value in event.items()
)
return (
u"MESSAGE LOST: unformattable object logged: {error}\n"
u"Recoverable data: {text}\n"
u"Exception during formatting:\n{failure}"
.format(error=safe_repr(error), failure=failure, text=text)
)
def formatTime(when, timeFormat=timeFormatRFC3339, default=u"-"):
"""
Format a timestamp as text.
Example::
>>> from time import time
>>> from twisted.logger import formatTime
>>>
>>> t = time()
>>> formatTime(t)
u'2013-10-22T14:19:11-0700'
>>> formatTime(t, timeFormat="%Y/%W") # Year and week number
u'2013/42'
>>>
@param when: A timestamp.
@type then: L{float}
@param timeFormat: A time format.
@type timeFormat: L{unicode} or L{None}
@param default: Text to return if C{when} or C{timeFormat} is L{None}.
@type default: L{unicode}
@return: A formatted time.
@rtype: L{unicode}
"""
if (timeFormat is None or when is None):
return default
else:
tz = FixedOffsetTimeZone.fromLocalTimeStamp(when)
datetime = DateTime.fromtimestamp(when, tz)
return unicode(datetime.strftime(timeFormat))
def formatEventAsClassicLogText(event, formatTime=formatTime):
"""
Format an event as a line of human-readable text for, e.g. traditional log
file output.
The output format is C{u"{timeStamp} [{system}] {event}\\n"}, where:
- C{timeStamp} is computed by calling the given C{formatTime} callable
on the event's C{"log_time"} value
- C{system} is the event's C{"log_system"} value, if set, otherwise,
the C{"log_namespace"} and C{"log_level"}, joined by a C{u"#"}. Each
defaults to C{u"-"} is not set.
- C{event} is the event, as formatted by L{formatEvent}.
Example::
>>> from __future__ import print_function
>>> from time import time
>>> from twisted.logger import formatEventAsClassicLogText
>>> from twisted.logger import LogLevel
>>>
>>> formatEventAsClassicLogText(dict()) # No format, returns None
>>> formatEventAsClassicLogText(dict(log_format=u"Hello!"))
u'- [-#-] Hello!\\n'
>>> formatEventAsClassicLogText(dict(
... log_format=u"Hello!",
... log_time=time(),
... log_namespace="my_namespace",
... log_level=LogLevel.info,
... ))
u'2013-10-22T17:30:02-0700 [my_namespace#info] Hello!\\n'
>>> formatEventAsClassicLogText(dict(
... log_format=u"Hello!",
... log_time=time(),
... log_system="my_system",
... ))
u'2013-11-11T17:22:06-0800 [my_system] Hello!\\n'
>>>
@param event: an event.
@type event: L{dict}
@param formatTime: A time formatter
@type formatTime: L{callable} that takes an C{event} argument and returns
a L{unicode}
@return: A formatted event, or L{None} if no output is appropriate.
@rtype: L{unicode} or L{None}
"""
eventText = eventAsText(event, formatTime=formatTime)
if not eventText:
return None
eventText = eventText.replace(u"\n", u"\n\t")
return eventText + u"\n"
class CallMapping(object):
"""
Read-only mapping that turns a C{()}-suffix in key names into an invocation
of the key rather than a lookup of the key.
Implementation support for L{formatWithCall}.
"""
def __init__(self, submapping):
"""
@param submapping: Another read-only mapping which will be used to look
up items.
"""
self._submapping = submapping
def __getitem__(self, key):
"""
Look up an item in the submapping for this L{CallMapping}, calling it
if C{key} ends with C{"()"}.
"""
callit = key.endswith(u"()")
realKey = key[:-2] if callit else key
value = self._submapping[realKey]
if callit:
value = value()
return value
def formatWithCall(formatString, mapping):
"""
Format a string like L{unicode.format}, but:
- taking only a name mapping; no positional arguments
- with the additional syntax that an empty set of parentheses
correspond to a formatting item that should be called, and its result
C{str}'d, rather than calling C{str} on the element directly as
normal.
For example::
>>> formatWithCall("{string}, {function()}.",
... dict(string="just a string",
... function=lambda: "a function"))
'just a string, a function.'
@param formatString: A PEP-3101 format string.
@type formatString: L{unicode}
@param mapping: A L{dict}-like object to format.
@return: The string with formatted values interpolated.
@rtype: L{unicode}
"""
return unicode(
aFormatter.vformat(formatString, (), CallMapping(mapping))
)
def _formatEvent(event):
"""
Formats an event as a L{unicode}, using the format in
C{event["log_format"]}.
This implementation should never raise an exception; if the formatting
cannot be done, the returned string will describe the event generically so
that a useful message is emitted regardless.
@param event: A logging event.
@type event: L{dict}
@return: A formatted string.
@rtype: L{unicode}
"""
try:
if "log_flattened" in event:
return flatFormat(event)
format = event.get("log_format", None)
if format is None:
return u""
# Make sure format is unicode.
if isinstance(format, bytes):
# If we get bytes, assume it's UTF-8 bytes
format = format.decode("utf-8")
elif not isinstance(format, unicode):
raise TypeError(
"Log format must be unicode or bytes, not {0!r}".format(format)
)
return formatWithCall(format, event)
except BaseException as e:
return formatUnformattableEvent(event, e)
def _formatTraceback(failure):
"""
Format a failure traceback, assuming UTF-8 and using a replacement
strategy for errors. Every effort is made to provide a usable
traceback, but should not that not be possible, a message and the
captured exception are logged.
@param failure: The failure to retrieve a traceback from.
@type failure: L{twisted.python.failure.Failure}
@return: The formatted traceback.
@rtype: L{unicode}
"""
try:
traceback = failure.getTraceback()
if isinstance(traceback, bytes):
traceback = traceback.decode('utf-8', errors='replace')
except BaseException as e:
traceback = (
u"(UNABLE TO OBTAIN TRACEBACK FROM EVENT):" + unicode(e)
)
return traceback
def _formatSystem(event):
"""
Format the system specified in the event in the "log_system" key if set,
otherwise the C{"log_namespace"} and C{"log_level"}, joined by a C{u"#"}.
Each defaults to C{u"-"} is not set. If formatting fails completely,
"UNFORMATTABLE" is returned.
@param event: The event containing the system specification.
@type event: L{dict}
@return: A formatted string representing the "log_system" key.
@rtype: L{unicode}
"""
system = event.get("log_system", None)
if system is None:
level = event.get("log_level", None)
if level is None:
levelName = u"-"
else:
levelName = level.name
system = u"{namespace}#{level}".format(
namespace=event.get("log_namespace", u"-"),
level=levelName,
)
else:
try:
system = unicode(system)
except Exception:
system = u"UNFORMATTABLE"
return system
def eventAsText(
event,
includeTraceback=True,
includeTimestamp=True,
includeSystem=True,
formatTime=formatTime,
):
r"""
Format an event as a unicode string. Optionally, attach timestamp,
traceback, and system information.
The full output format is:
C{u"{timeStamp} [{system}] {event}\n{traceback}\n"} where:
- C{timeStamp} is the event's C{"log_time"} value formatted with
the provided C{formatTime} callable.
- C{system} is the event's C{"log_system"} value, if set, otherwise,
the C{"log_namespace"} and C{"log_level"}, joined by a C{u"#"}. Each
defaults to C{u"-"} is not set.
- C{event} is the event, as formatted by L{formatEvent}.
- C{traceback} is the traceback if the event contains a
C{"log_failure"} key. In the event the original traceback cannot
be formatted, a message indicating the failure will be substituted.
If the event cannot be formatted, and no traceback exists, an empty string
is returned, even if includeSystem or includeTimestamp are true.
@param event: A logging event.
@type event: L{dict}
@param includeTraceback: If true and a C{"log_failure"} key exists, append
a traceback.
@type includeTraceback: L{bool}
@param includeTimestamp: If true include a formatted timestamp before the
event.
@type includeTimestamp: L{bool}
@param includeSystem: If true, include the event's C{"log_system"} value.
@type includeSystem: L{bool}
@param formatTime: A time formatter
@type formatTime: L{callable} that takes an C{event} argument and returns
a L{unicode}
@return: A formatted string with specified options.
@rtype: L{unicode}
@since: Twisted 18.9.0
"""
eventText = _formatEvent(event)
if includeTraceback and 'log_failure' in event:
f = event['log_failure']
traceback = _formatTraceback(f)
eventText = u"\n".join((eventText, traceback))
if not eventText:
return eventText
timeStamp = u""
if includeTimestamp:
timeStamp = u"".join([formatTime(event.get("log_time", None)), " "])
system = u""
if includeSystem:
system = u"".join([
u"[",
_formatSystem(event),
u"]",
u" "
])
return u"{timeStamp}{system}{eventText}".format(
timeStamp=timeStamp,
system=system,
eventText=eventText,
)

View file

@ -0,0 +1,240 @@
# -*- test-case-name: twisted.logger.test.test_global -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module includes process-global state associated with the logging system,
and implementation of logic for managing that global state.
"""
import sys
import warnings
from twisted.python.compat import currentframe
from twisted.python.reflect import qual
from ._buffer import LimitedHistoryLogObserver
from ._observer import LogPublisher
from ._filter import FilteringLogObserver, LogLevelFilterPredicate
from ._logger import Logger
from ._format import eventAsText
from ._levels import LogLevel
from ._io import LoggingFile
from ._file import FileLogObserver
MORE_THAN_ONCE_WARNING = (
"Warning: primary log target selected twice at <{fileNow}:{lineNow}> - "
"previously selected at <{fileThen}:{lineThen}>. Remove one of the calls "
"to beginLoggingTo."
)
class LogBeginner(object):
"""
A L{LogBeginner} holds state related to logging before logging has begun,
and begins logging when told to do so. Logging "begins" when someone has
selected a set of observers, like, for example, a L{FileLogObserver} that
writes to a file on disk, or to standard output.
Applications will not typically need to instantiate this class, except
those which intend to initialize the global logging system themselves,
which may wish to instantiate this for testing. The global instance for
the current process is exposed as
L{twisted.logger.globalLogBeginner}.
Before logging has begun, a L{LogBeginner} will:
1. Log any critical messages (e.g.: unhandled exceptions) to the given
file-like object.
2. Save (a limited number of) log events in a
L{LimitedHistoryLogObserver}.
@ivar _initialBuffer: A buffer of messages logged before logging began.
@type _initialBuffer: L{LimitedHistoryLogObserver}
@ivar _publisher: The log publisher passed in to L{LogBeginner}'s
constructor.
@type _publisher: L{LogPublisher}
@ivar _log: The logger used to log messages about the operation of the
L{LogBeginner} itself.
@type _log: L{Logger}
@ivar _temporaryObserver: If not L{None}, an L{ILogObserver} that observes
events on C{_publisher} for this L{LogBeginner}.
@type _temporaryObserver: L{ILogObserver} or L{None}
@ivar _stdio: An object with C{stderr} and C{stdout} attributes (like the
L{sys} module) which will be replaced when redirecting standard I/O.
@type _stdio: L{object}
@cvar _DEFAULT_BUFFER_SIZE: The default size for the initial log events
buffer.
@type _DEFAULT_BUFFER_SIZE: L{int}
"""
_DEFAULT_BUFFER_SIZE = 200
def __init__(
self, publisher, errorStream, stdio, warningsModule,
initialBufferSize=None,
):
"""
Initialize this L{LogBeginner}.
@param initialBufferSize: The size of the event buffer into which
events are collected until C{beginLoggingTo} is called. Or
C{None} to use the default size.
@type initialBufferSize: L{int} or L{types.NoneType}
"""
if initialBufferSize is None:
initialBufferSize = self._DEFAULT_BUFFER_SIZE
self._initialBuffer = LimitedHistoryLogObserver(size=initialBufferSize)
self._publisher = publisher
self._log = Logger(observer=publisher)
self._stdio = stdio
self._warningsModule = warningsModule
self._temporaryObserver = LogPublisher(
self._initialBuffer,
FilteringLogObserver(
FileLogObserver(
errorStream,
lambda event: eventAsText(
event,
includeTimestamp=False,
includeSystem=False,
) + '\n'
),
[LogLevelFilterPredicate(defaultLogLevel=LogLevel.critical)]
)
)
publisher.addObserver(self._temporaryObserver)
self._oldshowwarning = warningsModule.showwarning
def beginLoggingTo(
self, observers, discardBuffer=False, redirectStandardIO=True
):
"""
Begin logging to the given set of observers. This will:
1. Add all the observers given in C{observers} to the
L{LogPublisher} associated with this L{LogBeginner}.
2. Optionally re-direct standard output and standard error streams
to the logging system.
3. Re-play any messages that were previously logged to that
publisher to the new observers, if C{discardBuffer} is not set.
4. Stop logging critical errors from the L{LogPublisher} as strings
to the C{errorStream} associated with this L{LogBeginner}, and
allow them to be logged normally.
5. Re-direct warnings from the L{warnings} module associated with
this L{LogBeginner} to log messages.
@note: Since a L{LogBeginner} is designed to encapsulate the transition
between process-startup and log-system-configuration, this method
is intended to be invoked I{once}.
@param observers: The observers to register.
@type observers: iterable of L{ILogObserver}s
@param discardBuffer: Whether to discard the buffer and not re-play it
to the added observers. (This argument is provided mainly for
compatibility with legacy concerns.)
@type discardBuffer: L{bool}
@param redirectStandardIO: If true, redirect standard output and
standard error to the observers.
@type redirectStandardIO: L{bool}
"""
caller = currentframe(1)
filename, lineno = caller.f_code.co_filename, caller.f_lineno
for observer in observers:
self._publisher.addObserver(observer)
if self._temporaryObserver is not None:
self._publisher.removeObserver(self._temporaryObserver)
if not discardBuffer:
self._initialBuffer.replayTo(self._publisher)
self._temporaryObserver = None
self._warningsModule.showwarning = self.showwarning
else:
previousFile, previousLine = self._previousBegin
self._log.warn(
MORE_THAN_ONCE_WARNING,
fileNow=filename, lineNow=lineno,
fileThen=previousFile, lineThen=previousLine,
)
self._previousBegin = filename, lineno
if redirectStandardIO:
streams = [("stdout", LogLevel.info), ("stderr", LogLevel.error)]
else:
streams = []
for (stream, level) in streams:
oldStream = getattr(self._stdio, stream)
loggingFile = LoggingFile(
logger=Logger(namespace=stream, observer=self._publisher),
level=level,
encoding=getattr(oldStream, "encoding", None),
)
setattr(self._stdio, stream, loggingFile)
def showwarning(
self, message, category, filename, lineno, file=None, line=None
):
"""
Twisted-enabled wrapper around L{warnings.showwarning}.
If C{file} is L{None}, the default behaviour is to emit the warning to
the log system, otherwise the original L{warnings.showwarning} Python
function is called.
@param message: A warning message to emit.
@type message: L{str}
@param category: A warning category to associate with C{message}.
@type category: L{warnings.Warning}
@param filename: A file name for the source code file issuing the
warning.
@type warning: L{str}
@param lineno: A line number in the source file where the warning was
issued.
@type lineno: L{int}
@param file: A file to write the warning message to. If L{None},
write to L{sys.stderr}.
@type file: file-like object
@param line: A line of source code to include with the warning message.
If L{None}, attempt to read the line from C{filename} and
C{lineno}.
@type line: L{str}
"""
if file is None:
self._log.warn(
"{filename}:{lineno}: {category}: {warning}",
warning=message, category=qual(category),
filename=filename, lineno=lineno,
)
else:
self._oldshowwarning(
message, category, filename, lineno, file, line
)
globalLogPublisher = LogPublisher()
globalLogBeginner = LogBeginner(globalLogPublisher, sys.stderr, sys, warnings)

View file

@ -0,0 +1,202 @@
# -*- test-case-name: twisted.logger.test.test_io -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
File-like object that logs.
"""
import sys
from ._levels import LogLevel
class LoggingFile(object):
"""
File-like object that turns C{write()} calls into logging events.
Note that because event formats are C{unicode}, C{bytes} received via
C{write()} are converted to C{unicode}, which is the opposite of what
C{file} does.
@ivar softspace: File-like L{'softspace' attribute <file.softspace>}; zero
or one.
@type softspace: L{int}
"""
softspace = 0
def __init__(self, logger, level=LogLevel.info, encoding=None):
"""
@param logger: the logger to log through.
@param level: the log level to emit events with.
@param encoding: The encoding to expect when receiving bytes via
C{write()}. If L{None}, use C{sys.getdefaultencoding()}.
@type encoding: L{str}
@param log: The logger to send events to.
@type log: L{Logger}
"""
self.level = level
self.log = logger
if encoding is None:
self._encoding = sys.getdefaultencoding()
else:
self._encoding = encoding
self._buffer = ""
self._closed = False
@property
def closed(self):
"""
Read-only property. Is the file closed?
@return: true if closed, otherwise false.
@rtype: L{bool}
"""
return self._closed
@property
def encoding(self):
"""
Read-only property. File encoding.
@return: an encoding.
@rtype: L{str}
"""
return self._encoding
@property
def mode(self):
"""
Read-only property. File mode.
@return: "w"
@rtype: L{str}
"""
return "w"
@property
def newlines(self):
"""
Read-only property. Types of newlines encountered.
@return: L{None}
@rtype: L{None}
"""
return None
@property
def name(self):
"""
The name of this file; a repr-style string giving information about its
namespace.
@return: A file name.
@rtype: L{str}
"""
return (
"<{0} {1}#{2}>".format(
self.__class__.__name__,
self.log.namespace,
self.level.name,
)
)
def close(self):
"""
Close this file so it can no longer be written to.
"""
self._closed = True
def flush(self):
"""
No-op; this file does not buffer.
"""
pass
def fileno(self):
"""
Returns an invalid file descriptor, since this is not backed by an FD.
@return: C{-1}
@rtype: L{int}
"""
return -1
def isatty(self):
"""
A L{LoggingFile} is not a TTY.
@return: C{False}
@rtype: L{bool}
"""
return False
def write(self, string):
"""
Log the given message.
@param string: Data to write.
@type string: L{bytes} in this file's preferred encoding or L{unicode}
"""
if self._closed:
raise ValueError("I/O operation on closed file")
if isinstance(string, bytes):
string = string.decode(self._encoding)
lines = (self._buffer + string).split("\n")
self._buffer = lines[-1]
lines = lines[0:-1]
for line in lines:
self.log.emit(self.level, format=u"{log_io}", log_io=line)
def writelines(self, lines):
"""
Log each of the given lines as a separate message.
@param lines: Data to write.
@type lines: iterable of L{unicode} or L{bytes} in this file's
declared encoding
"""
for line in lines:
self.write(line)
def _unsupported(self, *args):
"""
Template for unsupported operations.
@param args: Arguments.
@type args: tuple of L{object}
"""
raise IOError("unsupported operation")
read = _unsupported
next = _unsupported
readline = _unsupported
readlines = _unsupported
xreadlines = _unsupported
seek = _unsupported
tell = _unsupported
truncate = _unsupported

View file

@ -0,0 +1,355 @@
# -*- test-case-name: twisted.logger.test.test_json -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tools for saving and loading log events in a structured format.
"""
import types
from constantly import NamedConstant
from json import dumps, loads
from uuid import UUID
from ._flatten import flattenEvent
from ._file import FileLogObserver
from ._levels import LogLevel
from ._logger import Logger
from twisted.python.compat import unicode, _PY3
from twisted.python.failure import Failure
log = Logger()
def failureAsJSON(failure):
"""
Convert a failure to a JSON-serializable data structure.
@param failure: A failure to serialize.
@type failure: L{Failure}
@return: a mapping of strings to ... stuff, mostly reminiscent of
L{Failure.__getstate__}
@rtype: L{dict}
"""
return dict(
failure.__getstate__(),
type=dict(
__module__=failure.type.__module__,
__name__=failure.type.__name__,
)
)
def asBytes(obj):
"""
On Python 2, we really need native strings in a variety of places;
attribute names will sort of work in a __dict__, but they're subtly wrong;
however, printing tracebacks relies on I/O to containers that only support
bytes. This function converts _all_ native strings within a
JSON-deserialized object to bytes.
@param obj: An object to convert to bytes.
@type obj: L{object}
@return: A string of UTF-8 bytes.
@rtype: L{bytes}
"""
if isinstance(obj, list):
return map(asBytes, obj)
elif isinstance(obj, dict):
return dict((asBytes(k), asBytes(v)) for k, v in obj.items())
elif isinstance(obj, unicode):
return obj.encode("utf-8")
else:
return obj
def failureFromJSON(failureDict):
"""
Load a L{Failure} from a dictionary deserialized from JSON.
@param failureDict: a JSON-deserialized object like one previously returned
by L{failureAsJSON}.
@type failureDict: L{dict} mapping L{unicode} to attributes
@return: L{Failure}
@rtype: L{Failure}
"""
# InstanceType() is only available in Python 2 and lower.
# __new__ is only available on new-style classes.
newFailure = getattr(Failure, "__new__", None)
if newFailure is None:
f = types.InstanceType(Failure)
else:
f = newFailure(Failure)
if not _PY3:
# Python 2 needs the failure dictionary as purely bytes, not text
failureDict = asBytes(failureDict)
typeInfo = failureDict["type"]
failureDict["type"] = type(typeInfo["__name__"], (), typeInfo)
f.__dict__ = failureDict
return f
classInfo = [
(
lambda level: (
isinstance(level, NamedConstant) and
getattr(LogLevel, level.name, None) is level
),
UUID("02E59486-F24D-46AD-8224-3ACDF2A5732A"),
lambda level: dict(name=level.name),
lambda level: getattr(LogLevel, level["name"], None)
),
(
lambda o: isinstance(o, Failure),
UUID("E76887E2-20ED-49BF-A8F8-BA25CC586F2D"),
failureAsJSON, failureFromJSON
),
]
uuidToLoader = dict([
(uuid, loader) for (predicate, uuid, saver, loader) in classInfo
])
def objectLoadHook(aDict):
"""
Dictionary-to-object-translation hook for certain value types used within
the logging system.
@see: the C{object_hook} parameter to L{json.load}
@param aDict: A dictionary loaded from a JSON object.
@type aDict: L{dict}
@return: C{aDict} itself, or the object represented by C{aDict}
@rtype: L{object}
"""
if "__class_uuid__" in aDict:
return uuidToLoader[UUID(aDict["__class_uuid__"])](aDict)
return aDict
def objectSaveHook(pythonObject):
"""
Object-to-serializable hook for certain value types used within the logging
system.
@see: the C{default} parameter to L{json.dump}
@param pythonObject: Any object.
@type pythonObject: L{object}
@return: If the object is one of the special types the logging system
supports, a specially-formatted dictionary; otherwise, a marker
dictionary indicating that it could not be serialized.
"""
for (predicate, uuid, saver, loader) in classInfo:
if predicate(pythonObject):
result = saver(pythonObject)
result["__class_uuid__"] = str(uuid)
return result
return {"unpersistable": True}
def eventAsJSON(event):
"""
Encode an event as JSON, flattening it if necessary to preserve as much
structure as possible.
Not all structure from the log event will be preserved when it is
serialized.
@param event: A log event dictionary.
@type event: L{dict} with arbitrary keys and values
@return: A string of the serialized JSON; note that this will contain no
newline characters, and may thus safely be stored in a line-delimited
file.
@rtype: L{unicode}
"""
if bytes is str:
kw = dict(default=objectSaveHook, encoding="charmap", skipkeys=True)
else:
def default(unencodable):
"""
Serialize an object not otherwise serializable by L{dumps}.
@param unencodable: An unencodable object.
@return: C{unencodable}, serialized
"""
if isinstance(unencodable, bytes):
return unencodable.decode("charmap")
return objectSaveHook(unencodable)
kw = dict(default=default, skipkeys=True)
flattenEvent(event)
result = dumps(event, **kw)
if not isinstance(result, unicode):
return unicode(result, "utf-8", "replace")
return result
def eventFromJSON(eventText):
"""
Decode a log event from JSON.
@param eventText: The output of a previous call to L{eventAsJSON}
@type eventText: L{unicode}
@return: A reconstructed version of the log event.
@rtype: L{dict}
"""
loaded = loads(eventText, object_hook=objectLoadHook)
return loaded
def jsonFileLogObserver(outFile, recordSeparator=u"\x1e"):
"""
Create a L{FileLogObserver} that emits JSON-serialized events to a
specified (writable) file-like object.
Events are written in the following form::
RS + JSON + NL
C{JSON} is the serialized event, which is JSON text. C{NL} is a newline
(C{u"\\n"}). C{RS} is a record separator. By default, this is a single
RS character (C{u"\\x1e"}), which makes the default output conform to the
IETF draft document "draft-ietf-json-text-sequence-13".
@param outFile: A file-like object. Ideally one should be passed which
accepts L{unicode} data. Otherwise, UTF-8 L{bytes} will be used.
@type outFile: L{io.IOBase}
@param recordSeparator: The record separator to use.
@type recordSeparator: L{unicode}
@return: A file log observer.
@rtype: L{FileLogObserver}
"""
return FileLogObserver(
outFile,
lambda event: u"{0}{1}\n".format(recordSeparator, eventAsJSON(event))
)
def eventsFromJSONLogFile(inFile, recordSeparator=None, bufferSize=4096):
"""
Load events from a file previously saved with L{jsonFileLogObserver}.
Event records that are truncated or otherwise unreadable are ignored.
@param inFile: A (readable) file-like object. Data read from C{inFile}
should be L{unicode} or UTF-8 L{bytes}.
@type inFile: iterable of lines
@param recordSeparator: The expected record separator.
If L{None}, attempt to automatically detect the record separator from
one of C{u"\\x1e"} or C{u""}.
@type recordSeparator: L{unicode}
@param bufferSize: The size of the read buffer used while reading from
C{inFile}.
@type bufferSize: integer
@return: Log events as read from C{inFile}.
@rtype: iterable of L{dict}
"""
def asBytes(s):
if type(s) is bytes:
return s
else:
return s.encode("utf-8")
def eventFromBytearray(record):
try:
text = bytes(record).decode("utf-8")
except UnicodeDecodeError:
log.error(
u"Unable to decode UTF-8 for JSON record: {record!r}",
record=bytes(record)
)
return None
try:
return eventFromJSON(text)
except ValueError:
log.error(
u"Unable to read JSON record: {record!r}",
record=bytes(record)
)
return None
if recordSeparator is None:
first = asBytes(inFile.read(1))
if first == b"\x1e":
# This looks json-text-sequence compliant.
recordSeparator = first
else:
# Default to simpler newline-separated stream, which does not use
# a record separator.
recordSeparator = b""
else:
recordSeparator = asBytes(recordSeparator)
first = b""
if recordSeparator == b"":
recordSeparator = b"\n" # Split on newlines below
eventFromRecord = eventFromBytearray
else:
def eventFromRecord(record):
if record[-1] == ord("\n"):
return eventFromBytearray(record)
else:
log.error(
u"Unable to read truncated JSON record: {record!r}",
record=bytes(record)
)
return None
buffer = bytearray(first)
while True:
newData = inFile.read(bufferSize)
if not newData:
if len(buffer) > 0:
event = eventFromRecord(buffer)
if event is not None:
yield event
break
buffer += asBytes(newData)
records = buffer.split(recordSeparator)
for record in records[:-1]:
if len(record) > 0:
event = eventFromRecord(record)
if event is not None:
yield event
buffer = records[-1]

View file

@ -0,0 +1,154 @@
# -*- test-case-name: twisted.logger.test.test_legacy -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Integration with L{twisted.python.log}.
"""
from zope.interface import implementer
from ._levels import LogLevel
from ._format import formatEvent
from ._observer import ILogObserver
from ._stdlib import fromStdlibLogLevelMapping, StringifiableFromEvent
@implementer(ILogObserver)
class LegacyLogObserverWrapper(object):
"""
L{ILogObserver} that wraps an L{twisted.python.log.ILogObserver}.
Received (new-style) events are modified prior to forwarding to
the legacy observer to ensure compatibility with observers that
expect legacy events.
"""
def __init__(self, legacyObserver):
"""
@param legacyObserver: a legacy observer to which this observer will
forward events.
@type legacyObserver: L{twisted.python.log.ILogObserver}
"""
self.legacyObserver = legacyObserver
def __repr__(self):
return (
"{self.__class__.__name__}({self.legacyObserver})"
.format(self=self)
)
def __call__(self, event):
"""
Forward events to the legacy observer after editing them to
ensure compatibility.
@param event: an event
@type event: L{dict}
"""
# The "message" key is required by textFromEventDict()
if "message" not in event:
event["message"] = ()
if "time" not in event:
event["time"] = event["log_time"]
if "system" not in event:
event["system"] = event.get("log_system", "-")
# Format new style -> old style
if "format" not in event and event.get("log_format", None) is not None:
# Create an object that implements __str__() in order to defer the
# work of formatting until it's needed by a legacy log observer.
event["format"] = "%(log_legacy)s"
event["log_legacy"] = StringifiableFromEvent(event.copy())
# In the old-style system, the 'message' key always holds a tuple
# of messages. If we find the 'message' key here to not be a
# tuple, it has been passed as new-style parameter. We drop it
# here because we render it using the old-style 'format' key,
# which otherwise doesn't get precedence, and the original event
# has been copied above.
if not isinstance(event["message"], tuple):
event["message"] = ()
# From log.failure() -> isError blah blah
if "log_failure" in event:
if "failure" not in event:
event["failure"] = event["log_failure"]
if "isError" not in event:
event["isError"] = 1
if "why" not in event:
event["why"] = formatEvent(event)
elif "isError" not in event:
if event["log_level"] in (LogLevel.error, LogLevel.critical):
event["isError"] = 1
else:
event["isError"] = 0
self.legacyObserver(event)
def publishToNewObserver(observer, eventDict, textFromEventDict):
"""
Publish an old-style (L{twisted.python.log}) event to a new-style
(L{twisted.logger}) observer.
@note: It's possible that a new-style event was sent to a
L{LegacyLogObserverWrapper}, and may now be getting sent back to a
new-style observer. In this case, it's already a new-style event,
adapted to also look like an old-style event, and we don't need to
tweak it again to be a new-style event, hence the checks for
already-defined new-style keys.
@param observer: A new-style observer to handle this event.
@type observer: L{ILogObserver}
@param eventDict: An L{old-style <twisted.python.log>}, log event.
@type eventDict: L{dict}
@param textFromEventDict: callable that can format an old-style event as a
string. Passed here rather than imported to avoid circular dependency.
@type textFromEventDict: 1-arg L{callable} taking L{dict} returning L{str}
@return: L{None}
"""
if "log_time" not in eventDict:
eventDict["log_time"] = eventDict["time"]
if "log_format" not in eventDict:
text = textFromEventDict(eventDict)
if text is not None:
eventDict["log_text"] = text
eventDict["log_format"] = u"{log_text}"
if "log_level" not in eventDict:
if "logLevel" in eventDict:
try:
level = fromStdlibLogLevelMapping[eventDict["logLevel"]]
except KeyError:
level = None
elif "isError" in eventDict:
if eventDict["isError"]:
level = LogLevel.critical
else:
level = LogLevel.info
else:
level = LogLevel.info
if level is not None:
eventDict["log_level"] = level
if "log_namespace" not in eventDict:
eventDict["log_namespace"] = u"log_legacy"
if "log_system" not in eventDict and "system" in eventDict:
eventDict["log_system"] = eventDict["system"]
observer(eventDict)

View file

@ -0,0 +1,110 @@
# -*- test-case-name: twisted.logger.test.test_levels -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Log levels.
"""
from constantly import NamedConstant, Names
class InvalidLogLevelError(Exception):
"""
Someone tried to use a L{LogLevel} that is unknown to the logging system.
"""
def __init__(self, level):
"""
@param level: A log level.
@type level: L{LogLevel}
"""
super(InvalidLogLevelError, self).__init__(str(level))
self.level = level
class LogLevel(Names):
"""
Constants describing log levels.
@cvar debug: Debugging events: Information of use to a developer of the
software, not generally of interest to someone running the software
unless they are attempting to diagnose a software issue.
@cvar info: Informational events: Routine information about the status of
an application, such as incoming connections, startup of a subsystem,
etc.
@cvar warn: Warning events: Events that may require greater attention than
informational events but are not a systemic failure condition, such as
authorization failures, bad data from a network client, etc. Such
events are of potential interest to system administrators, and should
ideally be phrased in such a way, or documented, so as to indicate an
action that an administrator might take to mitigate the warning.
@cvar error: Error conditions: Events indicating a systemic failure, such
as programming errors in the form of unhandled exceptions, loss of
connectivity to an external system without which no useful work can
proceed, such as a database or API endpoint, or resource exhaustion.
Similarly to warnings, errors that are related to operational
parameters may be actionable to system administrators and should
provide references to resources which an administrator might use to
resolve them.
@cvar critical: Critical failures: Errors indicating systemic failure (ie.
service outage), data corruption, imminent data loss, etc. which must
be handled immediately. This includes errors unanticipated by the
software, such as unhandled exceptions, wherein the cause and
consequences are unknown.
"""
debug = NamedConstant()
info = NamedConstant()
warn = NamedConstant()
error = NamedConstant()
critical = NamedConstant()
@classmethod
def levelWithName(cls, name):
"""
Get the log level with the given name.
@param name: The name of a log level.
@type name: L{str} (native string)
@return: The L{LogLevel} with the specified C{name}.
@rtype: L{LogLevel}
@raise InvalidLogLevelError: if the C{name} does not name a valid log
level.
"""
try:
return cls.lookupByName(name)
except ValueError:
raise InvalidLogLevelError(name)
@classmethod
def _priorityForLevel(cls, level):
"""
We want log levels to have defined ordering - the order of definition -
but they aren't value constants (the only value is the name). This is
arguably a bug in Twisted, so this is just a workaround for U{until
this is fixed in some way
<https://twistedmatrix.com/trac/ticket/6523>}.
@param level: A log level.
@type level: L{LogLevel}
@return: A numeric index indicating priority (lower is higher level).
@rtype: L{int}
"""
return cls._levelPriorities[level]
LogLevel._levelPriorities = dict(
(level, index) for (index, level) in
(enumerate(LogLevel.iterconstants()))
)

View file

@ -0,0 +1,275 @@
# -*- test-case-name: twisted.logger.test.test_logger -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Logger class.
"""
from time import time
from twisted.python.compat import currentframe
from twisted.python.failure import Failure
from ._levels import InvalidLogLevelError, LogLevel
class Logger(object):
"""
A L{Logger} emits log messages to an observer. You should instantiate it
as a class or module attribute, as documented in L{this module's
documentation <twisted.logger>}.
@type namespace: L{str}
@ivar namespace: the namespace for this logger
@type source: L{object}
@ivar source: The object which is emitting events via this logger
@type: L{ILogObserver}
@ivar observer: The observer that this logger will send events to.
"""
@staticmethod
def _namespaceFromCallingContext():
"""
Derive a namespace from the module containing the caller's caller.
@return: the fully qualified python name of a module.
@rtype: L{str} (native string)
"""
try:
return currentframe(2).f_globals["__name__"]
except KeyError:
return "<unknown>"
def __init__(self, namespace=None, source=None, observer=None):
"""
@param namespace: The namespace for this logger. Uses a dotted
notation, as used by python modules. If not L{None}, then the name
of the module of the caller is used.
@type namespace: L{str} (native string)
@param source: The object which is emitting events via this
logger; this is automatically set on instances of a class
if this L{Logger} is an attribute of that class.
@type source: L{object}
@param observer: The observer that this logger will send events to.
If L{None}, use the L{global log publisher <globalLogPublisher>}.
@type observer: L{ILogObserver}
"""
if namespace is None:
namespace = self._namespaceFromCallingContext()
self.namespace = namespace
self.source = source
if observer is None:
from ._global import globalLogPublisher
self.observer = globalLogPublisher
else:
self.observer = observer
def __get__(self, oself, type=None):
"""
When used as a descriptor, i.e.::
# File: athing.py
class Something(object):
log = Logger()
def hello(self):
self.log.info("Hello")
a L{Logger}'s namespace will be set to the name of the class it is
declared on. In the above example, the namespace would be
C{athing.Something}.
Additionally, its source will be set to the actual object referring to
the L{Logger}. In the above example, C{Something.log.source} would be
C{Something}, and C{Something().log.source} would be an instance of
C{Something}.
"""
if oself is None:
source = type
else:
source = oself
return self.__class__(
".".join([type.__module__, type.__name__]),
source,
observer=self.observer,
)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.namespace)
def emit(self, level, format=None, **kwargs):
"""
Emit a log event to all log observers at the given level.
@param level: a L{LogLevel}
@param format: a message format using new-style (PEP 3101)
formatting. The logging event (which is a L{dict}) is
used to render this format string.
@param kwargs: additional key/value pairs to include in the event.
Note that values which are later mutated may result in
non-deterministic behavior from observers that schedule work for
later execution.
"""
if level not in LogLevel.iterconstants():
self.failure(
"Got invalid log level {invalidLevel!r} in {logger}.emit().",
Failure(InvalidLogLevelError(level)),
invalidLevel=level,
logger=self,
)
return
event = kwargs
event.update(
log_logger=self, log_level=level, log_namespace=self.namespace,
log_source=self.source, log_format=format, log_time=time(),
)
if "log_trace" in event:
event["log_trace"].append((self, self.observer))
self.observer(event)
def failure(self, format, failure=None, level=LogLevel.critical, **kwargs):
"""
Log a failure and emit a traceback.
For example::
try:
frob(knob)
except Exception:
log.failure("While frobbing {knob}", knob=knob)
or::
d = deferredFrob(knob)
d.addErrback(lambda f: log.failure("While frobbing {knob}",
f, knob=knob))
This method is generally meant to capture unexpected exceptions in
code; an exception that is caught and handled somehow should be logged,
if appropriate, via L{Logger.error} instead. If some unknown exception
occurs and your code doesn't know how to handle it, as in the above
example, then this method provides a means to describe the failure in
nerd-speak. This is done at L{LogLevel.critical} by default, since no
corrective guidance can be offered to an user/administrator, and the
impact of the condition is unknown.
@param format: a message format using new-style (PEP 3101) formatting.
The logging event (which is a L{dict}) is used to render this
format string.
@param failure: a L{Failure} to log. If L{None}, a L{Failure} is
created from the exception in flight.
@param level: a L{LogLevel} to use.
@param kwargs: additional key/value pairs to include in the event.
Note that values which are later mutated may result in
non-deterministic behavior from observers that schedule work for
later execution.
"""
if failure is None:
failure = Failure()
self.emit(level, format, log_failure=failure, **kwargs)
def debug(self, format=None, **kwargs):
"""
Emit a log event at log level L{LogLevel.debug}.
@param format: a message format using new-style (PEP 3101) formatting.
The logging event (which is a L{dict}) is used to render this
format string.
@param kwargs: additional key/value pairs to include in the event.
Note that values which are later mutated may result in
non-deterministic behavior from observers that schedule work for
later execution.
"""
self.emit(LogLevel.debug, format, **kwargs)
def info(self, format=None, **kwargs):
"""
Emit a log event at log level L{LogLevel.info}.
@param format: a message format using new-style (PEP 3101) formatting.
The logging event (which is a L{dict}) is used to render this
format string.
@param kwargs: additional key/value pairs to include in the event.
Note that values which are later mutated may result in
non-deterministic behavior from observers that schedule work for
later execution.
"""
self.emit(LogLevel.info, format, **kwargs)
def warn(self, format=None, **kwargs):
"""
Emit a log event at log level L{LogLevel.warn}.
@param format: a message format using new-style (PEP 3101) formatting.
The logging event (which is a L{dict}) is used to render this
format string.
@param kwargs: additional key/value pairs to include in the event.
Note that values which are later mutated may result in
non-deterministic behavior from observers that schedule work for
later execution.
"""
self.emit(LogLevel.warn, format, **kwargs)
def error(self, format=None, **kwargs):
"""
Emit a log event at log level L{LogLevel.error}.
@param format: a message format using new-style (PEP 3101) formatting.
The logging event (which is a L{dict}) is used to render this
format string.
@param kwargs: additional key/value pairs to include in the event.
Note that values which are later mutated may result in
non-deterministic behavior from observers that schedule work for
later execution.
"""
self.emit(LogLevel.error, format, **kwargs)
def critical(self, format=None, **kwargs):
"""
Emit a log event at log level L{LogLevel.critical}.
@param format: a message format using new-style (PEP 3101) formatting.
The logging event (which is a L{dict}) is used to render this
format string.
@param kwargs: additional key/value pairs to include in the event.
Note that values which are later mutated may result in
non-deterministic behavior from observers that schedule work for
later execution.
"""
self.emit(LogLevel.critical, format, **kwargs)
_log = Logger()
_loggerFor = lambda obj:_log.__get__(obj, obj.__class__)

View file

@ -0,0 +1,158 @@
# -*- test-case-name: twisted.logger.test.test_observer -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Basic log observers.
"""
from zope.interface import Interface, implementer
from twisted.python.failure import Failure
from ._logger import Logger
OBSERVER_DISABLED = (
"Temporarily disabling observer {observer} due to exception: {log_failure}"
)
class ILogObserver(Interface):
"""
An observer which can handle log events.
Unlike most interfaces within Twisted, an L{ILogObserver} I{must be
thread-safe}. Log observers may be called indiscriminately from many
different threads, as any thread may wish to log a message at any time.
"""
def __call__(event):
"""
Log an event.
@type event: C{dict} with (native) C{str} keys.
@param event: A dictionary with arbitrary keys as defined by the
application emitting logging events, as well as keys added by the
logging system. The logging system reserves the right to set any
key beginning with the prefix C{"log_"}; applications should not
use any key so named. Currently, the following keys are used by
the logging system in some way, if they are present (they are all
optional):
- C{"log_format"}: a PEP-3101-style format string which draws
upon the keys in the event as its values, used to format the
event for human consumption.
- C{"log_flattened"}: a dictionary mapping keys derived from
the names and format values used in the C{"log_format"}
string to their values. This is used to preserve some
structured information for use with
L{twisted.logger.extractField}.
- C{"log_trace"}: A L{list} designed to capture information
about which L{LogPublisher}s have observed the event.
- C{"log_level"}: a L{log level
<twisted.logger.LogLevel>} constant, indicating the
importance of and audience for this event.
- C{"log_namespace"}: a namespace for the emitter of the event,
given as a unicode string.
- C{"log_system"}: a string indicating the network event or
method call which resulted in the message being logged.
"""
@implementer(ILogObserver)
class LogPublisher(object):
"""
I{ILogObserver} that fans out events to other observers.
Keeps track of a set of L{ILogObserver} objects and forwards
events to each.
"""
def __init__(self, *observers):
self._observers = list(observers)
self.log = Logger(observer=self)
def addObserver(self, observer):
"""
Registers an observer with this publisher.
@param observer: An L{ILogObserver} to add.
"""
if not callable(observer):
raise TypeError("Observer is not callable: {0!r}".format(observer))
if observer not in self._observers:
self._observers.append(observer)
def removeObserver(self, observer):
"""
Unregisters an observer with this publisher.
@param observer: An L{ILogObserver} to remove.
"""
try:
self._observers.remove(observer)
except ValueError:
pass
def __call__(self, event):
"""
Forward events to contained observers.
"""
if "log_trace" in event:
def trace(observer):
"""
Add tracing information for an observer.
@param observer: an observer being forwarded to
@type observer: L{ILogObserver}
"""
event["log_trace"].append((self, observer))
else:
trace = None
brokenObservers = []
for observer in self._observers:
if trace is not None:
trace(observer)
try:
observer(event)
except Exception:
brokenObservers.append((observer, Failure()))
for brokenObserver, failure in brokenObservers:
errorLogger = self._errorLoggerForObserver(brokenObserver)
errorLogger.failure(
OBSERVER_DISABLED,
failure=failure,
observer=brokenObserver,
)
def _errorLoggerForObserver(self, observer):
"""
Create an error-logger based on this logger, which does not contain the
given bad observer.
@param observer: The observer which previously had an error.
@type observer: L{ILogObserver}
@return: L{None}
"""
errorPublisher = LogPublisher(*[
obs for obs in self._observers
if obs is not observer
])
return Logger(observer=errorPublisher)

View file

@ -0,0 +1,147 @@
# -*- test-case-name: twisted.logger.test.test_stdlib -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Integration with Python standard library logging.
"""
import logging as stdlibLogging
from zope.interface import implementer
from twisted.python.compat import _PY3, currentframe, unicode
from ._levels import LogLevel
from ._format import formatEvent
from ._observer import ILogObserver
# Mappings to Python's logging module
toStdlibLogLevelMapping = {
LogLevel.debug: stdlibLogging.DEBUG,
LogLevel.info: stdlibLogging.INFO,
LogLevel.warn: stdlibLogging.WARNING,
LogLevel.error: stdlibLogging.ERROR,
LogLevel.critical: stdlibLogging.CRITICAL,
}
def _reverseLogLevelMapping():
"""
Reverse the above mapping, adding both the numerical keys used above and
the corresponding string keys also used by python logging.
@return: the reversed mapping
"""
mapping = {}
for logLevel, pyLogLevel in toStdlibLogLevelMapping.items():
mapping[pyLogLevel] = logLevel
mapping[stdlibLogging.getLevelName(pyLogLevel)] = logLevel
return mapping
fromStdlibLogLevelMapping = _reverseLogLevelMapping()
@implementer(ILogObserver)
class STDLibLogObserver(object):
"""
Log observer that writes to the python standard library's C{logging}
module.
@note: Warning: specific logging configurations (example: network) can lead
to this observer blocking. Nothing is done here to prevent that, so be
sure to not to configure the standard library logging module to block
when used in conjunction with this module: code within Twisted, such as
twisted.web, assumes that logging does not block.
@cvar defaultStackDepth: This is the default number of frames that it takes
to get from L{STDLibLogObserver} through the logging module, plus one;
in other words, the number of frames if you were to call a
L{STDLibLogObserver} directly. This is useful to use as an offset for
the C{stackDepth} parameter to C{__init__}, to add frames for other
publishers.
"""
defaultStackDepth = 4
def __init__(self, name="twisted", stackDepth=defaultStackDepth):
"""
@param loggerName: logger identifier.
@type loggerName: C{str}
@param stackDepth: The depth of the stack to investigate for caller
metadata.
@type stackDepth: L{int}
"""
self.logger = stdlibLogging.getLogger(name)
self.logger.findCaller = self._findCaller
self.stackDepth = stackDepth
def _findCaller(self, stackInfo=False, stackLevel=1):
"""
Based on the stack depth passed to this L{STDLibLogObserver}, identify
the calling function.
@param stackInfo: Whether or not to construct stack information.
(Currently ignored.)
@type stackInfo: L{bool}
@param stackLevel: The number of stack frames to skip when determining
the caller (currently ignored; use stackDepth on the instance).
@type stackLevel: L{int}
@return: Depending on Python version, either a 3-tuple of (filename,
lineno, name) or a 4-tuple of that plus stack information.
@rtype: L{tuple}
"""
f = currentframe(self.stackDepth)
co = f.f_code
if _PY3:
extra = (None,)
else:
extra = ()
return (co.co_filename, f.f_lineno, co.co_name) + extra
def __call__(self, event):
"""
Format an event and bridge it to Python logging.
"""
level = event.get("log_level", LogLevel.info)
failure = event.get('log_failure')
if failure is None:
excInfo = None
else:
excInfo = (
failure.type, failure.value, failure.getTracebackObject())
stdlibLevel = toStdlibLogLevelMapping.get(level, stdlibLogging.INFO)
self.logger.log(
stdlibLevel, StringifiableFromEvent(event), exc_info=excInfo)
class StringifiableFromEvent(object):
"""
An object that implements C{__str__()} in order to defer the work of
formatting until it's converted into a C{str}.
"""
def __init__(self, event):
"""
@param event: An event.
@type event: L{dict}
"""
self.event = event
def __unicode__(self):
return formatEvent(self.event)
def __bytes__(self):
return unicode(self).encode("utf-8")
if _PY3:
__str__ = __unicode__
else:
__str__ = __bytes__

View file

@ -0,0 +1,48 @@
# -*- test-case-name: twisted.logger.test.test_util -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Logging utilities.
"""
def formatTrace(trace):
"""
Format a trace (that is, the contents of the C{log_trace} key of a log
event) as a visual indication of the message's propagation through various
observers.
@param trace: the contents of the C{log_trace} key from an event.
@type trace: object
@return: A multi-line string with indentation and arrows indicating the
flow of the message through various observers.
@rtype: L{unicode}
"""
def formatWithName(obj):
if hasattr(obj, "name"):
return u"{0} ({1})".format(obj, obj.name)
else:
return u"{0}".format(obj)
result = []
lineage = []
for parent, child in trace:
if not lineage or lineage[-1] is not parent:
if parent in lineage:
while lineage[-1] is not parent:
lineage.pop()
else:
if not lineage:
result.append(u"{0}\n".format(formatWithName(parent)))
lineage.append(parent)
result.append(u" " * len(lineage))
result.append(u"-> {0}\n".format(formatWithName(child)))
return u"".join(result)

View file

@ -0,0 +1,7 @@
# -*- test-case-name: twisted.logger.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Unit tests for L{twisted.logger}.
"""

View file

@ -0,0 +1,62 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._buffer}.
"""
from zope.interface.verify import verifyObject, BrokenMethodImplementation
from twisted.trial import unittest
from .._observer import ILogObserver
from .._buffer import LimitedHistoryLogObserver
class LimitedHistoryLogObserverTests(unittest.TestCase):
"""
Tests for L{LimitedHistoryLogObserver}.
"""
def test_interface(self):
"""
L{LimitedHistoryLogObserver} provides L{ILogObserver}.
"""
observer = LimitedHistoryLogObserver(0)
try:
verifyObject(ILogObserver, observer)
except BrokenMethodImplementation as e:
self.fail(e)
def test_order(self):
"""
L{LimitedHistoryLogObserver} saves history in the order it is received.
"""
size = 4
events = [dict(n=n) for n in range(size//2)]
observer = LimitedHistoryLogObserver(size)
for event in events:
observer(event)
outEvents = []
observer.replayTo(outEvents.append)
self.assertEqual(events, outEvents)
def test_limit(self):
"""
When more events than a L{LimitedHistoryLogObserver}'s maximum size are
buffered, older events will be dropped.
"""
size = 4
events = [dict(n=n) for n in range(size*2)]
observer = LimitedHistoryLogObserver(size)
for event in events:
observer(event)
outEvents = []
observer.replayTo(outEvents.append)
self.assertEqual(events[-size:], outEvents)

View file

@ -0,0 +1,39 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._capture}.
"""
from twisted.logger import Logger, LogLevel
from twisted.trial.unittest import TestCase
from .._capture import capturedLogs
class LogCaptureTests(TestCase):
"""
Tests for L{LogCaptureTests}.
"""
log = Logger()
def test_capture(self):
"""
Events logged within context are captured.
"""
foo = object()
with capturedLogs() as captured:
self.log.debug("Capture this, please", foo=foo)
self.log.info("Capture this too, please", foo=foo)
self.assertTrue(len(captured) == 2)
self.assertEqual(captured[0]["log_format"], "Capture this, please")
self.assertEqual(captured[0]["log_level"], LogLevel.debug)
self.assertEqual(captured[0]["foo"], foo)
self.assertEqual(captured[1]["log_format"], "Capture this too, please")
self.assertEqual(captured[1]["log_level"], LogLevel.info)
self.assertEqual(captured[1]["foo"], foo)

View file

@ -0,0 +1,199 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._file}.
"""
from io import StringIO
from zope.interface.verify import verifyObject, BrokenMethodImplementation
from twisted.trial.unittest import TestCase
from twisted.python.failure import Failure
from twisted.python.compat import unicode
from .._observer import ILogObserver
from .._file import FileLogObserver
from .._file import textFileLogObserver
class FileLogObserverTests(TestCase):
"""
Tests for L{FileLogObserver}.
"""
def test_interface(self):
"""
L{FileLogObserver} is an L{ILogObserver}.
"""
with StringIO() as fileHandle:
observer = FileLogObserver(fileHandle, lambda e: unicode(e))
try:
verifyObject(ILogObserver, observer)
except BrokenMethodImplementation as e:
self.fail(e)
def test_observeWrites(self):
"""
L{FileLogObserver} writes to the given file when it observes events.
"""
with StringIO() as fileHandle:
observer = FileLogObserver(fileHandle, lambda e: unicode(e))
event = dict(x=1)
observer(event)
self.assertEqual(fileHandle.getvalue(), unicode(event))
def _test_observeWrites(self, what, count):
"""
Verify that observer performs an expected number of writes when the
formatter returns a given value.
@param what: the value for the formatter to return.
@type what: L{unicode}
@param count: the expected number of writes.
@type count: L{int}
"""
with DummyFile() as fileHandle:
observer = FileLogObserver(fileHandle, lambda e: what)
event = dict(x=1)
observer(event)
self.assertEqual(fileHandle.writes, count)
def test_observeWritesNone(self):
"""
L{FileLogObserver} does not write to the given file when it observes
events and C{formatEvent} returns L{None}.
"""
self._test_observeWrites(None, 0)
def test_observeWritesEmpty(self):
"""
L{FileLogObserver} does not write to the given file when it observes
events and C{formatEvent} returns C{u""}.
"""
self._test_observeWrites(u"", 0)
def test_observeFlushes(self):
"""
L{FileLogObserver} calles C{flush()} on the output file when it
observes an event.
"""
with DummyFile() as fileHandle:
observer = FileLogObserver(fileHandle, lambda e: unicode(e))
event = dict(x=1)
observer(event)
self.assertEqual(fileHandle.flushes, 1)
class TextFileLogObserverTests(TestCase):
"""
Tests for L{textFileLogObserver}.
"""
def test_returnsFileLogObserver(self):
"""
L{textFileLogObserver} returns a L{FileLogObserver}.
"""
with StringIO() as fileHandle:
observer = textFileLogObserver(fileHandle)
self.assertIsInstance(observer, FileLogObserver)
def test_outFile(self):
"""
Returned L{FileLogObserver} has the correct outFile.
"""
with StringIO() as fileHandle:
observer = textFileLogObserver(fileHandle)
self.assertIs(observer._outFile, fileHandle)
def test_timeFormat(self):
"""
Returned L{FileLogObserver} has the correct outFile.
"""
with StringIO() as fileHandle:
observer = textFileLogObserver(fileHandle, timeFormat=u"%f")
observer(dict(log_format=u"XYZZY", log_time=112345.6))
self.assertEqual(fileHandle.getvalue(), u"600000 [-#-] XYZZY\n")
def test_observeFailure(self):
"""
If the C{"log_failure"} key exists in an event, the observer appends
the failure's traceback to the output.
"""
with StringIO() as fileHandle:
observer = textFileLogObserver(fileHandle)
try:
1 / 0
except ZeroDivisionError:
failure = Failure()
event = dict(log_failure=failure)
observer(event)
output = fileHandle.getvalue()
self.assertTrue(output.split("\n")[1].startswith("\tTraceback "),
msg=repr(output))
def test_observeFailureThatRaisesInGetTraceback(self):
"""
If the C{"log_failure"} key exists in an event, and contains an object
that raises when you call its C{getTraceback()}, then the observer
appends a message noting the problem, instead of raising.
"""
with StringIO() as fileHandle:
observer = textFileLogObserver(fileHandle)
event = dict(log_failure=object()) # object has no getTraceback()
observer(event)
output = fileHandle.getvalue()
expected = (
"(UNABLE TO OBTAIN TRACEBACK FROM EVENT)"
)
self.assertIn(expected, output)
class DummyFile(object):
"""
File that counts writes and flushes.
"""
def __init__(self):
self.writes = 0
self.flushes = 0
def write(self, data):
"""
Write data.
@param data: data
@type data: L{unicode} or L{bytes}
"""
self.writes += 1
def flush(self):
"""
Flush buffers.
"""
self.flushes += 1
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass

View file

@ -0,0 +1,411 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._filter}.
"""
from zope.interface.verify import verifyObject, BrokenMethodImplementation
from twisted.trial import unittest
from .._levels import InvalidLogLevelError
from .._levels import LogLevel
from .._observer import ILogObserver
from .._observer import LogPublisher
from .._filter import FilteringLogObserver
from .._filter import PredicateResult
from .._filter import LogLevelFilterPredicate
class FilteringLogObserverTests(unittest.TestCase):
"""
Tests for L{FilteringLogObserver}.
"""
def test_interface(self):
"""
L{FilteringLogObserver} is an L{ILogObserver}.
"""
observer = FilteringLogObserver(lambda e: None, ())
try:
verifyObject(ILogObserver, observer)
except BrokenMethodImplementation as e:
self.fail(e)
def filterWith(self, filters, other=False):
"""
Apply a set of pre-defined filters on a known set of events and return
the filtered list of event numbers.
The pre-defined events are four events with a C{count} attribute set to
C{0}, C{1}, C{2}, and C{3}.
@param filters: names of the filters to apply.
Options are:
- C{"twoMinus"} (count <=2),
- C{"twoPlus"} (count >= 2),
- C{"notTwo"} (count != 2),
- C{"no"} (False).
@type filters: iterable of str
@param other: Whether to return a list of filtered events as well.
@type other: L{bool}
@return: event numbers or 2-tuple of lists of event numbers.
@rtype: L{list} of L{int} or 2-L{tuple} of L{list} of L{int}
"""
events = [
dict(count=0),
dict(count=1),
dict(count=2),
dict(count=3),
]
class Filters(object):
@staticmethod
def twoMinus(event):
"""
count <= 2
@param event: an event
@type event: dict
@return: L{PredicateResult.yes} if C{event["count"] <= 2},
otherwise L{PredicateResult.maybe}.
"""
if event["count"] <= 2:
return PredicateResult.yes
return PredicateResult.maybe
@staticmethod
def twoPlus(event):
"""
count >= 2
@param event: an event
@type event: dict
@return: L{PredicateResult.yes} if C{event["count"] >= 2},
otherwise L{PredicateResult.maybe}.
"""
if event["count"] >= 2:
return PredicateResult.yes
return PredicateResult.maybe
@staticmethod
def notTwo(event):
"""
count != 2
@param event: an event
@type event: dict
@return: L{PredicateResult.yes} if C{event["count"] != 2},
otherwise L{PredicateResult.maybe}.
"""
if event["count"] == 2:
return PredicateResult.no
return PredicateResult.maybe
@staticmethod
def no(event):
"""
No way, man.
@param event: an event
@type event: dict
@return: L{PredicateResult.no}
"""
return PredicateResult.no
@staticmethod
def bogus(event):
"""
Bogus result.
@param event: an event
@type event: dict
@return: something other than a valid predicate result.
"""
return None
predicates = (getattr(Filters, f) for f in filters)
eventsSeen = []
eventsNotSeen = []
trackingObserver = eventsSeen.append
if other:
extra = [eventsNotSeen.append]
else:
extra = []
filteringObserver = FilteringLogObserver(
trackingObserver, predicates, *extra
)
for e in events:
filteringObserver(e)
if extra:
return (
[e["count"] for e in eventsSeen],
[e["count"] for e in eventsNotSeen],
)
return [e["count"] for e in eventsSeen]
def test_shouldLogEventNoFilters(self):
"""
No filters: all events come through.
"""
self.assertEqual(self.filterWith([]), [0, 1, 2, 3])
def test_shouldLogEventNoFilter(self):
"""
Filter with negative predicate result.
"""
self.assertEqual(self.filterWith(["notTwo"]), [0, 1, 3])
def test_shouldLogEventOtherObserver(self):
"""
Filtered results get sent to the other observer, if passed.
"""
self.assertEqual(self.filterWith(["notTwo"], True), ([0, 1, 3], [2]))
def test_shouldLogEventYesFilter(self):
"""
Filter with positive predicate result.
"""
self.assertEqual(self.filterWith(["twoPlus"]), [0, 1, 2, 3])
def test_shouldLogEventYesNoFilter(self):
"""
Series of filters with positive and negative predicate results.
"""
self.assertEqual(self.filterWith(["twoPlus", "no"]), [2, 3])
def test_shouldLogEventYesYesNoFilter(self):
"""
Series of filters with positive, positive and negative predicate
results.
"""
self.assertEqual(
self.filterWith(["twoPlus", "twoMinus", "no"]),
[0, 1, 2, 3]
)
def test_shouldLogEventBadPredicateResult(self):
"""
Filter with invalid predicate result.
"""
self.assertRaises(TypeError, self.filterWith, ["bogus"])
def test_call(self):
"""
Test filtering results from each predicate type.
"""
e = dict(obj=object())
def callWithPredicateResult(result):
seen = []
observer = FilteringLogObserver(
lambda e: seen.append(e),
(lambda e: result,)
)
observer(e)
return seen
self.assertIn(e, callWithPredicateResult(PredicateResult.yes))
self.assertIn(e, callWithPredicateResult(PredicateResult.maybe))
self.assertNotIn(e, callWithPredicateResult(PredicateResult.no))
def test_trace(self):
"""
Tracing keeps track of forwarding through the filtering observer.
"""
event = dict(log_trace=[])
oYes = lambda e: None
oNo = lambda e: None
def testObserver(e):
self.assertIs(e, event)
self.assertEqual(
event["log_trace"],
[
(publisher, yesFilter),
(yesFilter, oYes),
(publisher, noFilter),
# ... noFilter doesn't call oNo
(publisher, oTest),
]
)
oTest = testObserver
yesFilter = FilteringLogObserver(
oYes,
(lambda e: PredicateResult.yes,)
)
noFilter = FilteringLogObserver(
oNo,
(lambda e: PredicateResult.no,)
)
publisher = LogPublisher(yesFilter, noFilter, testObserver)
publisher(event)
class LogLevelFilterPredicateTests(unittest.TestCase):
"""
Tests for L{LogLevelFilterPredicate}.
"""
def test_defaultLogLevel(self):
"""
Default log level is used.
"""
predicate = LogLevelFilterPredicate()
self.assertEqual(
predicate.logLevelForNamespace(None),
predicate.defaultLogLevel
)
self.assertEqual(
predicate.logLevelForNamespace(""),
predicate.defaultLogLevel
)
self.assertEqual(
predicate.logLevelForNamespace("rocker.cool.namespace"),
predicate.defaultLogLevel
)
def test_setLogLevel(self):
"""
Setting and retrieving log levels.
"""
predicate = LogLevelFilterPredicate()
predicate.setLogLevelForNamespace(None, LogLevel.error)
predicate.setLogLevelForNamespace("twext.web2", LogLevel.debug)
predicate.setLogLevelForNamespace("twext.web2.dav", LogLevel.warn)
self.assertEqual(
predicate.logLevelForNamespace(None),
LogLevel.error
)
self.assertEqual(
predicate.logLevelForNamespace("twisted"),
LogLevel.error
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2"),
LogLevel.debug
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2.dav"),
LogLevel.warn
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2.dav.test"),
LogLevel.warn
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2.dav.test1.test2"),
LogLevel.warn
)
def test_setInvalidLogLevel(self):
"""
Can't pass invalid log levels to C{setLogLevelForNamespace()}.
"""
predicate = LogLevelFilterPredicate()
self.assertRaises(
InvalidLogLevelError,
predicate.setLogLevelForNamespace, "twext.web2", object()
)
# Level must be a constant, not the name of a constant
self.assertRaises(
InvalidLogLevelError,
predicate.setLogLevelForNamespace, "twext.web2", "debug"
)
def test_clearLogLevels(self):
"""
Clearing log levels.
"""
predicate = LogLevelFilterPredicate()
predicate.setLogLevelForNamespace("twext.web2", LogLevel.debug)
predicate.setLogLevelForNamespace("twext.web2.dav", LogLevel.error)
predicate.clearLogLevels()
self.assertEqual(
predicate.logLevelForNamespace("twisted"),
predicate.defaultLogLevel
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2"),
predicate.defaultLogLevel
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2.dav"),
predicate.defaultLogLevel
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2.dav.test"),
predicate.defaultLogLevel
)
self.assertEqual(
predicate.logLevelForNamespace("twext.web2.dav.test1.test2"),
predicate.defaultLogLevel
)
def test_filtering(self):
"""
Events are filtered based on log level/namespace.
"""
predicate = LogLevelFilterPredicate()
predicate.setLogLevelForNamespace(None, LogLevel.error)
predicate.setLogLevelForNamespace("twext.web2", LogLevel.debug)
predicate.setLogLevelForNamespace("twext.web2.dav", LogLevel.warn)
def checkPredicate(namespace, level, expectedResult):
event = dict(log_namespace=namespace, log_level=level)
self.assertEqual(expectedResult, predicate(event))
checkPredicate("", LogLevel.debug, PredicateResult.no)
checkPredicate("", LogLevel.error, PredicateResult.maybe)
checkPredicate("twext.web2", LogLevel.debug, PredicateResult.maybe)
checkPredicate("twext.web2", LogLevel.error, PredicateResult.maybe)
checkPredicate("twext.web2.dav", LogLevel.debug, PredicateResult.no)
checkPredicate("twext.web2.dav", LogLevel.error, PredicateResult.maybe)
checkPredicate(None, LogLevel.critical, PredicateResult.no)
checkPredicate("twext.web2", None, PredicateResult.no)

View file

@ -0,0 +1,323 @@
# -*- coding: utf-8 -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._format}.
"""
from itertools import count
import json
try:
from time import tzset
# We should upgrade to a version of pyflakes that does not require this.
tzset
except ImportError:
tzset = None
from twisted.trial import unittest
from .._format import formatEvent
from .._flatten import (
flattenEvent, extractField, KeyFlattener, aFormatter
)
class FlatFormattingTests(unittest.TestCase):
"""
Tests for flattened event formatting functions.
"""
def test_formatFlatEvent(self):
"""
L{flattenEvent} will "flatten" an event so that, if scrubbed of all but
serializable objects, it will preserve all necessary data to be
formatted once serialized. When presented with an event thusly
flattened, L{formatEvent} will produce the same output.
"""
counter = count()
class Ephemeral(object):
attribute = "value"
event1 = dict(
log_format=(
"callable: {callme()} "
"attribute: {object.attribute} "
"numrepr: {number!r} "
"numstr: {number!s} "
"strrepr: {string!r} "
"unistr: {unistr!s}"
),
callme=lambda: next(counter), object=Ephemeral(),
number=7, string="hello", unistr=u"ö"
)
flattenEvent(event1)
event2 = dict(event1)
del event2["callme"]
del event2["object"]
event3 = json.loads(json.dumps(event2))
self.assertEqual(
formatEvent(event3),
(
u"callable: 0 "
"attribute: value "
"numrepr: 7 "
"numstr: 7 "
"strrepr: 'hello' "
u"unistr: ö"
)
)
def test_formatFlatEventBadFormat(self):
"""
If the format string is invalid, an error is produced.
"""
event1 = dict(
log_format=(
"strrepr: {string!X}"
),
string="hello",
)
flattenEvent(event1)
event2 = json.loads(json.dumps(event1))
self.assertTrue(
formatEvent(event2).startswith(u"Unable to format event")
)
def test_formatFlatEventWithMutatedFields(self):
"""
L{formatEvent} will prefer the stored C{str()} or C{repr()} value for
an object, in case the other version.
"""
class Unpersistable(object):
"""
Unpersitable object.
"""
destructed = False
def selfDestruct(self):
"""
Self destruct.
"""
self.destructed = True
def __repr__(self):
if self.destructed:
return "post-serialization garbage"
else:
return "un-persistable"
up = Unpersistable()
event1 = dict(
log_format="unpersistable: {unpersistable}", unpersistable=up
)
flattenEvent(event1)
up.selfDestruct()
self.assertEqual(formatEvent(event1), "unpersistable: un-persistable")
def test_keyFlattening(self):
"""
Test that L{KeyFlattener.flatKey} returns the expected keys for format
fields.
"""
def keyFromFormat(format):
for (
literalText,
fieldName,
formatSpec,
conversion,
) in aFormatter.parse(format):
return KeyFlattener().flatKey(
fieldName, formatSpec, conversion
)
# No name
try:
self.assertEqual(keyFromFormat("{}"), "!:")
except ValueError:
# In python 2.6, an empty field name causes Formatter.parse to
# raise ValueError.
# In Python 2.7, it's allowed, so this exception is unexpected.
raise
# Just a name
self.assertEqual(keyFromFormat("{foo}"), "foo!:")
# Add conversion
self.assertEqual(keyFromFormat("{foo!s}"), "foo!s:")
self.assertEqual(keyFromFormat("{foo!r}"), "foo!r:")
# Add format spec
self.assertEqual(keyFromFormat("{foo:%s}"), "foo!:%s")
self.assertEqual(keyFromFormat("{foo:!}"), "foo!:!")
self.assertEqual(keyFromFormat("{foo::}"), "foo!::")
# Both
self.assertEqual(keyFromFormat("{foo!s:%s}"), "foo!s:%s")
self.assertEqual(keyFromFormat("{foo!s:!}"), "foo!s:!")
self.assertEqual(keyFromFormat("{foo!s::}"), "foo!s::")
[keyPlusLiteral] = aFormatter.parse("{x}")
key = keyPlusLiteral[1:]
sameFlattener = KeyFlattener()
self.assertEqual(sameFlattener.flatKey(*key), "x!:")
self.assertEqual(sameFlattener.flatKey(*key), "x!:/2")
def _test_formatFlatEvent_fieldNamesSame(self, event=None):
"""
The same format field used twice in one event is rendered twice.
@param event: An event to flatten. If L{None}, create a new event.
@return: C{event} or the event created.
"""
if event is None:
counter = count()
class CountStr(object):
"""
Hack
"""
def __str__(self):
return str(next(counter))
event = dict(
log_format="{x} {x}",
x=CountStr(),
)
flattenEvent(event)
self.assertEqual(formatEvent(event), u"0 1")
return event
def test_formatFlatEventFieldNamesSame(self):
"""
The same format field used twice in one event is rendered twice.
"""
self._test_formatFlatEvent_fieldNamesSame()
def test_formatFlatEventFieldNamesSameAgain(self):
"""
The same event flattened twice gives the same (already rendered)
result.
"""
event = self._test_formatFlatEvent_fieldNamesSame()
self._test_formatFlatEvent_fieldNamesSame(event)
def test_formatEventFlatTrailingText(self):
"""
L{formatEvent} will handle a flattened event with tailing text after
a replacement field.
"""
event = dict(
log_format="test {x} trailing",
x='value',
)
flattenEvent(event)
result = formatEvent(event)
self.assertEqual(result, u"test value trailing")
def test_extractField(self, flattenFirst=lambda x: x):
"""
L{extractField} will extract a field used in the format string.
@param flattenFirst: callable to flatten an event
"""
class ObjectWithRepr(object):
def __repr__(self):
return "repr"
class Something(object):
def __init__(self):
self.number = 7
self.object = ObjectWithRepr()
def __getstate__(self):
raise NotImplementedError("Just in case.")
event = dict(
log_format="{something.number} {something.object}",
something=Something(),
)
flattened = flattenFirst(event)
def extract(field):
return extractField(field, flattened)
self.assertEqual(extract("something.number"), 7)
self.assertEqual(extract("something.number!s"), "7")
self.assertEqual(extract("something.object!s"), "repr")
def test_extractFieldFlattenFirst(self):
"""
L{extractField} behaves identically if the event is explicitly
flattened first.
"""
def flattened(evt):
flattenEvent(evt)
return evt
self.test_extractField(flattened)
def test_flattenEventWithoutFormat(self):
"""
L{flattenEvent} will do nothing to an event with no format string.
"""
inputEvent = {'a': 'b', 'c': 1}
flattenEvent(inputEvent)
self.assertEqual(inputEvent, {'a': 'b', 'c': 1})
def test_flattenEventWithInertFormat(self):
"""
L{flattenEvent} will do nothing to an event with a format string that
contains no format fields.
"""
inputEvent = {'a': 'b', 'c': 1, 'log_format': 'simple message'}
flattenEvent(inputEvent)
self.assertEqual(
inputEvent,
{
'a': 'b',
'c': 1,
'log_format': 'simple message',
}
)
def test_flattenEventWithNoneFormat(self):
"""
L{flattenEvent} will do nothing to an event with log_format set to
None.
"""
inputEvent = {'a': 'b', 'c': 1, 'log_format': None}
flattenEvent(inputEvent)
self.assertEqual(
inputEvent,
{
'a': 'b',
'c': 1,
'log_format': None,
}
)

View file

@ -0,0 +1,783 @@
# -*- coding: utf-8 -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._format}.
"""
from twisted.python.test.test_tzhelper import mktime, addTZCleanup, setTZ
try:
from time import tzset
# We should upgrade to a version of pyflakes that does not require this.
tzset
except ImportError:
tzset = None
from twisted.trial import unittest
from twisted.trial.unittest import SkipTest
from twisted.python.compat import _PY3, unicode
from .._levels import LogLevel
from .._format import (
formatEvent, formatUnformattableEvent, formatTime,
formatEventAsClassicLogText, formatWithCall, eventAsText
)
from twisted.python.failure import Failure
class FormattingTests(unittest.TestCase):
"""
Tests for basic event formatting functions.
"""
def test_formatEvent(self):
"""
L{formatEvent} will format an event according to several rules:
- A string with no formatting instructions will be passed straight
through.
- PEP 3101 strings will be formatted using the keys and values of
the event as named fields.
- PEP 3101 keys ending with C{()} will be treated as instructions
to call that key (which ought to be a callable) before
formatting.
L{formatEvent} will always return L{unicode}, and if given bytes, will
always treat its format string as UTF-8 encoded.
"""
def format(logFormat, **event):
event["log_format"] = logFormat
result = formatEvent(event)
self.assertIs(type(result), unicode)
return result
self.assertEqual(u"", format(b""))
self.assertEqual(u"", format(u""))
self.assertEqual(u"abc", format("{x}", x="abc"))
self.assertEqual(
u"no, yes.",
format(
"{not_called}, {called()}.",
not_called="no", called=lambda: "yes"
)
)
self.assertEqual(u"S\xe1nchez", format(b"S\xc3\xa1nchez"))
badResult = format(b"S\xe1nchez")
self.assertIn(u"Unable to format event", badResult)
maybeResult = format(b"S{a!s}nchez", a=b"\xe1")
# The behavior of unicode.format("{x}", x=bytes) differs on py2 and
# py3. Perhaps we should make our modified formatting more consistent
# than this? -glyph
if not _PY3:
self.assertIn(u"Unable to format event", maybeResult)
else:
self.assertIn(u"Sb'\\xe1'nchez", maybeResult)
xe1 = unicode(repr(b"\xe1"))
self.assertIn(u"S" + xe1 + "nchez", format(b"S{a!r}nchez", a=b"\xe1"))
def test_formatEventNoFormat(self):
"""
Formatting an event with no format.
"""
event = dict(foo=1, bar=2)
result = formatEvent(event)
self.assertEqual(u"", result)
def test_formatEventWeirdFormat(self):
"""
Formatting an event with a bogus format.
"""
event = dict(log_format=object(), foo=1, bar=2)
result = formatEvent(event)
self.assertIn("Log format must be unicode or bytes", result)
self.assertIn(repr(event), result)
def test_formatUnformattableEvent(self):
"""
Formatting an event that's just plain out to get us.
"""
event = dict(log_format="{evil()}", evil=lambda: 1 / 0)
result = formatEvent(event)
self.assertIn("Unable to format event", result)
self.assertIn(repr(event), result)
def test_formatUnformattableEventWithUnformattableKey(self):
"""
Formatting an unformattable event that has an unformattable key.
"""
event = {
"log_format": "{evil()}",
"evil": lambda: 1 / 0,
Unformattable(): "gurk",
}
result = formatEvent(event)
self.assertIn("MESSAGE LOST: unformattable object logged:", result)
self.assertIn("Recoverable data:", result)
self.assertIn("Exception during formatting:", result)
def test_formatUnformattableEventWithUnformattableValue(self):
"""
Formatting an unformattable event that has an unformattable value.
"""
event = dict(
log_format="{evil()}",
evil=lambda: 1 / 0,
gurk=Unformattable(),
)
result = formatEvent(event)
self.assertIn("MESSAGE LOST: unformattable object logged:", result)
self.assertIn("Recoverable data:", result)
self.assertIn("Exception during formatting:", result)
def test_formatUnformattableEventWithUnformattableErrorOMGWillItStop(self):
"""
Formatting an unformattable event that has an unformattable value.
"""
event = dict(
log_format="{evil()}",
evil=lambda: 1 / 0,
recoverable="okay",
)
# Call formatUnformattableEvent() directly with a bogus exception.
result = formatUnformattableEvent(event, Unformattable())
self.assertIn("MESSAGE LOST: unformattable object logged:", result)
self.assertIn(repr("recoverable") + " = " + repr("okay"), result)
class TimeFormattingTests(unittest.TestCase):
"""
Tests for time formatting functions.
"""
def setUp(self):
addTZCleanup(self)
def test_formatTimeWithDefaultFormat(self):
"""
Default time stamp format is RFC 3339 and offset respects the timezone
as set by the standard C{TZ} environment variable and L{tzset} API.
"""
if tzset is None:
raise SkipTest(
"Platform cannot change timezone; unable to verify offsets."
)
def testForTimeZone(name, expectedDST, expectedSTD):
setTZ(name)
localDST = mktime((2006, 6, 30, 0, 0, 0, 4, 181, 1))
localSTD = mktime((2007, 1, 31, 0, 0, 0, 2, 31, 0))
self.assertEqual(formatTime(localDST), expectedDST)
self.assertEqual(formatTime(localSTD), expectedSTD)
# UTC
testForTimeZone(
"UTC+00",
u"2006-06-30T00:00:00+0000",
u"2007-01-31T00:00:00+0000",
)
# West of UTC
testForTimeZone(
"EST+05EDT,M4.1.0,M10.5.0",
u"2006-06-30T00:00:00-0400",
u"2007-01-31T00:00:00-0500",
)
# East of UTC
testForTimeZone(
"CEST-01CEDT,M4.1.0,M10.5.0",
u"2006-06-30T00:00:00+0200",
u"2007-01-31T00:00:00+0100",
)
# No DST
testForTimeZone(
"CST+06",
u"2006-06-30T00:00:00-0600",
u"2007-01-31T00:00:00-0600",
)
def test_formatTimeWithNoTime(self):
"""
If C{when} argument is L{None}, we get the default output.
"""
self.assertEqual(formatTime(None), u"-")
self.assertEqual(formatTime(None, default=u"!"), u"!")
def test_formatTimeWithNoFormat(self):
"""
If C{timeFormat} argument is L{None}, we get the default output.
"""
t = mktime((2013, 9, 24, 11, 40, 47, 1, 267, 1))
self.assertEqual(formatTime(t, timeFormat=None), u"-")
self.assertEqual(formatTime(t, timeFormat=None, default=u"!"), u"!")
def test_formatTimeWithAlternateTimeFormat(self):
"""
Alternate time format in output.
"""
t = mktime((2013, 9, 24, 11, 40, 47, 1, 267, 1))
self.assertEqual(formatTime(t, timeFormat="%Y/%W"), u"2013/38")
def test_formatTimePercentF(self):
"""
"%f" supported in time format.
"""
self.assertEqual(formatTime(1000000.23456, timeFormat="%f"), u"234560")
class ClassicLogFormattingTests(unittest.TestCase):
"""
Tests for classic text log event formatting functions.
"""
def test_formatTimeDefault(self):
"""
Time is first field. Default time stamp format is RFC 3339 and offset
respects the timezone as set by the standard C{TZ} environment variable
and L{tzset} API.
"""
if tzset is None:
raise SkipTest(
"Platform cannot change timezone; unable to verify offsets."
)
addTZCleanup(self)
setTZ("UTC+00")
t = mktime((2013, 9, 24, 11, 40, 47, 1, 267, 1))
event = dict(log_format=u"XYZZY", log_time=t)
self.assertEqual(
formatEventAsClassicLogText(event),
u"2013-09-24T11:40:47+0000 [-\x23-] XYZZY\n",
)
def test_formatTimeCustom(self):
"""
Time is first field. Custom formatting function is an optional
argument.
"""
formatTime = lambda t: u"__{0}__".format(t)
event = dict(log_format=u"XYZZY", log_time=12345)
self.assertEqual(
formatEventAsClassicLogText(event, formatTime=formatTime),
u"__12345__ [-\x23-] XYZZY\n",
)
def test_formatNamespace(self):
"""
Namespace is first part of second field.
"""
event = dict(log_format=u"XYZZY", log_namespace="my.namespace")
self.assertEqual(
formatEventAsClassicLogText(event),
u"- [my.namespace\x23-] XYZZY\n",
)
def test_formatLevel(self):
"""
Level is second part of second field.
"""
event = dict(log_format=u"XYZZY", log_level=LogLevel.warn)
self.assertEqual(
formatEventAsClassicLogText(event),
u"- [-\x23warn] XYZZY\n",
)
def test_formatSystem(self):
"""
System is second field.
"""
event = dict(log_format=u"XYZZY", log_system=u"S.Y.S.T.E.M.")
self.assertEqual(
formatEventAsClassicLogText(event),
u"- [S.Y.S.T.E.M.] XYZZY\n",
)
def test_formatSystemRulz(self):
"""
System is not supplanted by namespace and level.
"""
event = dict(
log_format=u"XYZZY",
log_namespace="my.namespace",
log_level=LogLevel.warn,
log_system=u"S.Y.S.T.E.M.",
)
self.assertEqual(
formatEventAsClassicLogText(event),
u"- [S.Y.S.T.E.M.] XYZZY\n",
)
def test_formatSystemUnformattable(self):
"""
System is not supplanted by namespace and level.
"""
event = dict(log_format=u"XYZZY", log_system=Unformattable())
self.assertEqual(
formatEventAsClassicLogText(event),
u"- [UNFORMATTABLE] XYZZY\n",
)
def test_formatFormat(self):
"""
Formatted event is last field.
"""
event = dict(log_format=u"id:{id}", id="123")
self.assertEqual(
formatEventAsClassicLogText(event),
u"- [-\x23-] id:123\n",
)
def test_formatNoFormat(self):
"""
No format string.
"""
event = dict(id="123")
self.assertIs(
formatEventAsClassicLogText(event),
None
)
def test_formatEmptyFormat(self):
"""
Empty format string.
"""
event = dict(log_format="", id="123")
self.assertIs(
formatEventAsClassicLogText(event),
None
)
def test_formatFormatMultiLine(self):
"""
If the formatted event has newlines, indent additional lines.
"""
event = dict(log_format=u'XYZZY\nA hollow voice says:\n"Plugh"')
self.assertEqual(
formatEventAsClassicLogText(event),
u'- [-\x23-] XYZZY\n\tA hollow voice says:\n\t"Plugh"\n',
)
class FormatFieldTests(unittest.TestCase):
"""
Tests for format field functions.
"""
def test_formatWithCall(self):
"""
L{formatWithCall} is an extended version of L{unicode.format} that
will interpret a set of parentheses "C{()}" at the end of a format key
to mean that the format key ought to be I{called} rather than
stringified.
"""
self.assertEqual(
formatWithCall(
u"Hello, {world}. {callme()}.",
dict(world="earth", callme=lambda: "maybe")
),
"Hello, earth. maybe."
)
self.assertEqual(
formatWithCall(
u"Hello, {repr()!r}.",
dict(repr=lambda: "repr")
),
"Hello, 'repr'."
)
class Unformattable(object):
"""
An object that raises an exception from C{__repr__}.
"""
def __repr__(self):
return str(1 / 0)
class CapturedError(Exception):
"""
A captured error for use in format tests.
"""
class EventAsTextTests(unittest.TestCase):
"""
Tests for L{eventAsText}, all of which ensure that the
returned type is UTF-8 decoded text.
"""
def test_eventWithTraceback(self):
"""
An event with a C{log_failure} key will have a traceback appended.
"""
try:
raise CapturedError("This is a fake error")
except CapturedError:
f = Failure()
event = {
"log_format": u"This is a test log message"
}
event["log_failure"] = f
eventText = eventAsText(
event,
includeTimestamp=True,
includeSystem=False
)
self.assertIn(unicode(f.getTraceback()), eventText)
self.assertIn(u'This is a test log message', eventText)
def test_formatEmptyEventWithTraceback(self):
"""
An event with an empty C{log_format} key appends a traceback from
the accompanying failure.
"""
try:
raise CapturedError("This is a fake error")
except CapturedError:
f = Failure()
event = {
"log_format": u""
}
event["log_failure"] = f
eventText = eventAsText(
event,
includeTimestamp=True,
includeSystem=False
)
self.assertIn(unicode(f.getTraceback()), eventText)
self.assertIn(u'This is a fake error', eventText)
def test_formatUnformattableWithTraceback(self):
"""
An event with an unformattable value in the C{log_format} key still
has a traceback appended.
"""
try:
raise CapturedError("This is a fake error")
except CapturedError:
f = Failure()
event = {
"log_format": "{evil()}",
"evil": lambda: 1 / 0,
}
event["log_failure"] = f
eventText = eventAsText(
event,
includeTimestamp=True,
includeSystem=False
)
self.assertIsInstance(eventText, unicode)
self.assertIn(unicode(f.getTraceback()), eventText)
self.assertIn(u'This is a fake error', eventText)
def test_formatUnformattableErrorWithTraceback(self):
"""
An event with an unformattable value in the C{log_format} key, that
throws an exception when __repr__ is invoked still has a traceback
appended.
"""
try:
raise CapturedError("This is a fake error")
except CapturedError:
f = Failure()
event = {
"log_format": "{evil()}",
"evil": lambda: 1 / 0,
Unformattable(): "gurk",
}
event["log_failure"] = f
eventText = eventAsText(
event,
includeTimestamp=True,
includeSystem=False
)
self.assertIsInstance(eventText, unicode)
self.assertIn(u'MESSAGE LOST', eventText)
self.assertIn(unicode(f.getTraceback()), eventText)
self.assertIn(u'This is a fake error', eventText)
def test_formatEventUnformattableTraceback(self):
"""
If a traceback cannot be appended, a message indicating this is true
is appended.
"""
event = {
"log_format": u""
}
event["log_failure"] = object()
eventText = eventAsText(
event,
includeTimestamp=True,
includeSystem=False
)
self.assertIsInstance(eventText, unicode)
self.assertIn(u"(UNABLE TO OBTAIN TRACEBACK FROM EVENT)", eventText)
def test_formatEventNonCritical(self):
"""
An event with no C{log_failure} key will not have a traceback appended.
"""
event = {
"log_format": u"This is a test log message"
}
eventText = eventAsText(
event,
includeTimestamp=True,
includeSystem=False
)
self.assertIsInstance(eventText, unicode)
self.assertIn(u'This is a test log message', eventText)
def test_formatTracebackMultibyte(self):
"""
An exception message with multibyte characters is properly handled.
"""
try:
raise CapturedError('')
except CapturedError:
f = Failure()
event = {
"log_format": u"This is a test log message"
}
event["log_failure"] = f
eventText = eventAsText(
event,
includeTimestamp=True,
includeSystem=False
)
self.assertIn(u'', eventText)
self.assertIn(u'Traceback', eventText)
def test_formatTracebackHandlesUTF8DecodeFailure(self):
"""
An error raised attempting to decode the UTF still produces a
valid log message.
"""
try:
# 'test' in utf-16
raise CapturedError(b'\xff\xfet\x00e\x00s\x00t\x00')
except CapturedError:
f = Failure()
event = {
"log_format": u"This is a test log message"
}
event["log_failure"] = f
eventText = eventAsText(
event,
includeTimestamp=True,
includeSystem=False
)
self.assertIn(u'Traceback', eventText)
if not _PY3:
self.assertIn(u'\ufffd\ufffdt\x00e\x00s\x00t\x00', eventText)
else:
self.assertIn(
r"CapturedError(b'\xff\xfet\x00e\x00s\x00t\x00')",
eventText
)
def test_eventAsTextSystemOnly(self):
"""
If includeSystem is specified as the only option no timestamp or
traceback are printed.
"""
try:
raise CapturedError("This is a fake error")
except CapturedError:
f = Failure()
t = mktime((2013, 9, 24, 11, 40, 47, 1, 267, 1))
event = {
"log_format": u"ABCD",
"log_system": u"fake_system",
"log_time": t,
}
event["log_failure"] = f
eventText = eventAsText(
event,
includeTimestamp=False,
includeTraceback=False,
includeSystem=True,
)
self.assertEqual(
eventText,
u"[fake_system] ABCD",
)
def test_eventAsTextTimestampOnly(self):
"""
If includeTimestamp is specified as the only option no system or
traceback are printed.
"""
if tzset is None:
raise SkipTest(
"Platform cannot change timezone; unable to verify offsets."
)
addTZCleanup(self)
setTZ("UTC+00")
try:
raise CapturedError("This is a fake error")
except CapturedError:
f = Failure()
t = mktime((2013, 9, 24, 11, 40, 47, 1, 267, 1))
event = {
"log_format": u"ABCD",
"log_system": u"fake_system",
"log_time": t,
}
event["log_failure"] = f
eventText = eventAsText(
event,
includeTimestamp=True,
includeTraceback=False,
includeSystem=False,
)
self.assertEqual(
eventText,
u"2013-09-24T11:40:47+0000 ABCD",
)
def test_eventAsTextSystemMissing(self):
"""
If includeSystem is specified with a missing system [-#-]
is used.
"""
try:
raise CapturedError("This is a fake error")
except CapturedError:
f = Failure()
t = mktime((2013, 9, 24, 11, 40, 47, 1, 267, 1))
event = {
"log_format": u"ABCD",
"log_time": t,
}
event["log_failure"] = f
eventText = eventAsText(
event,
includeTimestamp=False,
includeTraceback=False,
includeSystem=True,
)
self.assertEqual(
eventText,
u"[-\x23-] ABCD",
)
def test_eventAsTextSystemMissingNamespaceAndLevel(self):
"""
If includeSystem is specified with a missing system but
namespace and level are present they are used.
"""
try:
raise CapturedError("This is a fake error")
except CapturedError:
f = Failure()
t = mktime((2013, 9, 24, 11, 40, 47, 1, 267, 1))
event = {
"log_format": u"ABCD",
"log_time": t,
"log_level": LogLevel.info,
"log_namespace": u'test',
}
event["log_failure"] = f
eventText = eventAsText(
event,
includeTimestamp=False,
includeTraceback=False,
includeSystem=True,
)
self.assertEqual(
eventText,
u"[test\x23info] ABCD",
)
def test_eventAsTextSystemMissingLevelOnly(self):
"""
If includeSystem is specified with a missing system but
level is present, level is included.
"""
try:
raise CapturedError("This is a fake error")
except CapturedError:
f = Failure()
t = mktime((2013, 9, 24, 11, 40, 47, 1, 267, 1))
event = {
"log_format": u"ABCD",
"log_time": t,
"log_level": LogLevel.info,
}
event["log_failure"] = f
eventText = eventAsText(
event,
includeTimestamp=False,
includeTraceback=False,
includeSystem=True,
)
self.assertEqual(
eventText,
u"[-\x23info] ABCD",
)

View file

@ -0,0 +1,372 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._global}.
"""
from __future__ import print_function
import io
from twisted.trial import unittest
from .._file import textFileLogObserver
from .._observer import LogPublisher
from .._logger import Logger
from .._global import LogBeginner
from .._global import MORE_THAN_ONCE_WARNING
from .._levels import LogLevel
from ..test.test_stdlib import nextLine
from twisted.python.failure import Failure
def compareEvents(test, actualEvents, expectedEvents):
"""
Compare two sequences of log events, examining only the the keys which are
present in both.
@param test: a test case doing the comparison
@type test: L{unittest.TestCase}
@param actualEvents: A list of log events that were emitted by a logger.
@type actualEvents: L{list} of L{dict}
@param expectedEvents: A list of log events that were expected by a test.
@type expected: L{list} of L{dict}
"""
if len(actualEvents) != len(expectedEvents):
test.assertEqual(actualEvents, expectedEvents)
allMergedKeys = set()
for event in expectedEvents:
allMergedKeys |= set(event.keys())
def simplify(event):
copy = event.copy()
for key in event.keys():
if key not in allMergedKeys:
copy.pop(key)
return copy
simplifiedActual = [simplify(event) for event in actualEvents]
test.assertEqual(simplifiedActual, expectedEvents)
class LogBeginnerTests(unittest.TestCase):
"""
Tests for L{LogBeginner}.
"""
def setUp(self):
self.publisher = LogPublisher()
self.errorStream = io.StringIO()
class NotSys(object):
stdout = object()
stderr = object()
class NotWarnings(object):
def __init__(self):
self.warnings = []
def showwarning(
self, message, category, filename, lineno,
file=None, line=None
):
"""
Emulate warnings.showwarning.
@param message: A warning message to emit.
@type message: L{str}
@param category: A warning category to associate with
C{message}.
@type category: L{warnings.Warning}
@param filename: A file name for the source code file issuing
the warning.
@type warning: L{str}
@param lineno: A line number in the source file where the
warning was issued.
@type lineno: L{int}
@param file: A file to write the warning message to. If
L{None}, write to L{sys.stderr}.
@type file: file-like object
@param line: A line of source code to include with the warning
message. If L{None}, attempt to read the line from
C{filename} and C{lineno}.
@type line: L{str}
"""
self.warnings.append(
(message, category, filename, lineno, file, line)
)
self.sysModule = NotSys()
self.warningsModule = NotWarnings()
self.beginner = LogBeginner(
self.publisher, self.errorStream, self.sysModule,
self.warningsModule
)
def test_beginLoggingToAddObservers(self):
"""
Test that C{beginLoggingTo()} adds observers.
"""
event = dict(foo=1, bar=2)
events1 = []
events2 = []
o1 = lambda e: events1.append(e)
o2 = lambda e: events2.append(e)
self.beginner.beginLoggingTo((o1, o2))
self.publisher(event)
self.assertEqual([event], events1)
self.assertEqual([event], events2)
def test_beginLoggingToBufferedEvents(self):
"""
Test that events are buffered until C{beginLoggingTo()} is
called.
"""
event = dict(foo=1, bar=2)
events1 = []
events2 = []
o1 = lambda e: events1.append(e)
o2 = lambda e: events2.append(e)
self.publisher(event) # Before beginLoggingTo; this is buffered
self.beginner.beginLoggingTo((o1, o2))
self.assertEqual([event], events1)
self.assertEqual([event], events2)
def _bufferLimitTest(self, limit, beginner):
"""
Verify that when more than C{limit} events are logged to L{LogBeginner},
only the last C{limit} are replayed by L{LogBeginner.beginLoggingTo}.
@param limit: The maximum number of events the log beginner should
buffer.
@type limit: L{int}
@param beginner: The L{LogBeginner} against which to verify.
@type beginner: L{LogBeginner}
@raise: C{self.failureException} if the wrong events are replayed by
C{beginner}.
@return: L{None}
"""
for count in range(limit + 1):
self.publisher(dict(count=count))
events = []
beginner.beginLoggingTo([events.append])
self.assertEqual(
list(range(1, limit + 1)),
list(event["count"] for event in events),
)
def test_defaultBufferLimit(self):
"""
Up to C{LogBeginner._DEFAULT_BUFFER_SIZE} log events are buffered for
replay by L{LogBeginner.beginLoggingTo}.
"""
limit = LogBeginner._DEFAULT_BUFFER_SIZE
self._bufferLimitTest(limit, self.beginner)
def test_overrideBufferLimit(self):
"""
The size of the L{LogBeginner} event buffer can be overridden with the
C{initialBufferSize} initilizer argument.
"""
limit = 3
beginner = LogBeginner(
self.publisher, self.errorStream, self.sysModule,
self.warningsModule, initialBufferSize=limit,
)
self._bufferLimitTest(limit, beginner)
def test_beginLoggingToTwice(self):
"""
When invoked twice, L{LogBeginner.beginLoggingTo} will emit a log
message warning the user that they previously began logging, and add
the new log observers.
"""
events1 = []
events2 = []
fileHandle = io.StringIO()
textObserver = textFileLogObserver(fileHandle)
self.publisher(dict(event="prebuffer"))
firstFilename, firstLine = nextLine()
self.beginner.beginLoggingTo([events1.append, textObserver])
self.publisher(dict(event="postbuffer"))
secondFilename, secondLine = nextLine()
self.beginner.beginLoggingTo([events2.append, textObserver])
self.publisher(dict(event="postwarn"))
warning = dict(
log_format=MORE_THAN_ONCE_WARNING,
log_level=LogLevel.warn,
fileNow=secondFilename, lineNow=secondLine,
fileThen=firstFilename, lineThen=firstLine
)
compareEvents(
self, events1,
[
dict(event="prebuffer"),
dict(event="postbuffer"),
warning,
dict(event="postwarn")
]
)
compareEvents(self, events2, [warning, dict(event="postwarn")])
output = fileHandle.getvalue()
self.assertIn('<{0}:{1}>'.format(firstFilename, firstLine),
output)
self.assertIn('<{0}:{1}>'.format(secondFilename, secondLine),
output)
def test_criticalLogging(self):
"""
Critical messages will be written as text to the error stream.
"""
log = Logger(observer=self.publisher)
log.info("ignore this")
log.critical("a critical {message}", message="message")
self.assertEqual(self.errorStream.getvalue(), u"a critical message\n")
def test_criticalLoggingStops(self):
"""
Once logging has begun with C{beginLoggingTo}, critical messages are no
longer written to the output stream.
"""
log = Logger(observer=self.publisher)
self.beginner.beginLoggingTo(())
log.critical("another critical message")
self.assertEqual(self.errorStream.getvalue(), u"")
def test_beginLoggingToRedirectStandardIO(self):
"""
L{LogBeginner.beginLoggingTo} will re-direct the standard output and
error streams by setting the C{stdio} and C{stderr} attributes on its
sys module object.
"""
x = []
self.beginner.beginLoggingTo([x.append])
print("Hello, world.", file=self.sysModule.stdout)
compareEvents(
self, x, [dict(log_namespace="stdout", log_io="Hello, world.")]
)
del x[:]
print("Error, world.", file=self.sysModule.stderr)
compareEvents(
self, x, [dict(log_namespace="stderr", log_io="Error, world.")]
)
def test_beginLoggingToDontRedirect(self):
"""
L{LogBeginner.beginLoggingTo} will leave the existing stdout/stderr in
place if it has been told not to replace them.
"""
oldOut = self.sysModule.stdout
oldErr = self.sysModule.stderr
self.beginner.beginLoggingTo((), redirectStandardIO=False)
self.assertIs(self.sysModule.stdout, oldOut)
self.assertIs(self.sysModule.stderr, oldErr)
def test_beginLoggingToPreservesEncoding(self):
"""
When L{LogBeginner.beginLoggingTo} redirects stdout/stderr streams, the
replacement streams will preserve the encoding of the replaced streams,
to minimally disrupt any application relying on a specific encoding.
"""
weird = io.TextIOWrapper(io.BytesIO(), "shift-JIS")
weirderr = io.TextIOWrapper(io.BytesIO(), "big5")
self.sysModule.stdout = weird
self.sysModule.stderr = weirderr
x = []
self.beginner.beginLoggingTo([x.append])
self.assertEqual(self.sysModule.stdout.encoding, "shift-JIS")
self.assertEqual(self.sysModule.stderr.encoding, "big5")
self.sysModule.stdout.write(b"\x97\x9B\n")
self.sysModule.stderr.write(b"\xBC\xFC\n")
compareEvents(
self, x, [dict(log_io=u"\u674e"), dict(log_io=u"\u7469")]
)
def test_warningsModule(self):
"""
L{LogBeginner.beginLoggingTo} will redirect the warnings of its
warnings module into the logging system.
"""
self.warningsModule.showwarning(
"a message", DeprecationWarning, __file__, 1
)
x = []
self.beginner.beginLoggingTo([x.append])
self.warningsModule.showwarning(
"another message", DeprecationWarning, __file__, 2
)
f = io.StringIO()
self.warningsModule.showwarning(
"yet another", DeprecationWarning, __file__, 3, file=f
)
self.assertEqual(
self.warningsModule.warnings,
[
("a message", DeprecationWarning, __file__, 1, None, None),
("yet another", DeprecationWarning, __file__, 3, f, None),
]
)
compareEvents(
self, x,
[dict(
warning="another message",
category=(
DeprecationWarning.__module__ + "." +
DeprecationWarning.__name__
),
filename=__file__, lineno=2,
)]
)
def test_failuresAppendTracebacks(self):
"""
The string resulting from a logged failure contains a traceback.
"""
f = Failure(Exception("this is not the behavior you are looking for"))
log = Logger(observer=self.publisher)
log.failure('a failure', failure=f)
msg = self.errorStream.getvalue()
self.assertIn('a failure', msg)
self.assertIn('this is not the behavior you are looking for', msg)
self.assertIn('Traceback', msg)

View file

@ -0,0 +1,274 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._io}.
"""
from __future__ import print_function
import sys
from twisted.trial import unittest
from .._levels import LogLevel
from .._logger import Logger
from .._observer import LogPublisher
from .._io import LoggingFile
class LoggingFileTests(unittest.TestCase):
"""
Tests for L{LoggingFile}.
"""
def setUp(self):
"""
Create a logger for test L{LoggingFile} instances to use.
"""
self.publisher = LogPublisher()
self.logger = Logger(observer=self.publisher)
def test_softspace(self):
"""
L{LoggingFile.softspace} is 0.
"""
self.assertEqual(LoggingFile.softspace, 0)
def test_readOnlyAttributes(self):
"""
Some L{LoggingFile} attributes are read-only.
"""
f = LoggingFile(self.logger)
self.assertRaises(AttributeError, setattr, f, "closed", True)
self.assertRaises(AttributeError, setattr, f, "encoding", "utf-8")
self.assertRaises(AttributeError, setattr, f, "mode", "r")
self.assertRaises(AttributeError, setattr, f, "newlines", ["\n"])
self.assertRaises(AttributeError, setattr, f, "name", "foo")
def test_unsupportedMethods(self):
"""
Some L{LoggingFile} methods are unsupported.
"""
f = LoggingFile(self.logger)
self.assertRaises(IOError, f.read)
self.assertRaises(IOError, f.next)
self.assertRaises(IOError, f.readline)
self.assertRaises(IOError, f.readlines)
self.assertRaises(IOError, f.xreadlines)
self.assertRaises(IOError, f.seek)
self.assertRaises(IOError, f.tell)
self.assertRaises(IOError, f.truncate)
def test_level(self):
"""
Default level is L{LogLevel.info} if not set.
"""
f = LoggingFile(self.logger)
self.assertEqual(f.level, LogLevel.info)
f = LoggingFile(self.logger, level=LogLevel.error)
self.assertEqual(f.level, LogLevel.error)
def test_encoding(self):
"""
Default encoding is C{sys.getdefaultencoding()} if not set.
"""
f = LoggingFile(self.logger)
self.assertEqual(f.encoding, sys.getdefaultencoding())
f = LoggingFile(self.logger, encoding="utf-8")
self.assertEqual(f.encoding, "utf-8")
def test_mode(self):
"""
Reported mode is C{"w"}.
"""
f = LoggingFile(self.logger)
self.assertEqual(f.mode, "w")
def test_newlines(self):
"""
The C{newlines} attribute is L{None}.
"""
f = LoggingFile(self.logger)
self.assertIsNone(f.newlines)
def test_name(self):
"""
The C{name} attribute is fixed.
"""
f = LoggingFile(self.logger)
self.assertEqual(
f.name,
"<LoggingFile twisted.logger.test.test_io#info>"
)
def test_close(self):
"""
L{LoggingFile.close} closes the file.
"""
f = LoggingFile(self.logger)
f.close()
self.assertTrue(f.closed)
self.assertRaises(ValueError, f.write, "Hello")
def test_flush(self):
"""
L{LoggingFile.flush} does nothing.
"""
f = LoggingFile(self.logger)
f.flush()
def test_fileno(self):
"""
L{LoggingFile.fileno} returns C{-1}.
"""
f = LoggingFile(self.logger)
self.assertEqual(f.fileno(), -1)
def test_isatty(self):
"""
L{LoggingFile.isatty} returns C{False}.
"""
f = LoggingFile(self.logger)
self.assertFalse(f.isatty())
def test_writeBuffering(self):
"""
Writing buffers correctly.
"""
f = self.observedFile()
f.write("Hello")
self.assertEqual(f.messages, [])
f.write(", world!\n")
self.assertEqual(f.messages, [u"Hello, world!"])
f.write("It's nice to meet you.\n\nIndeed.")
self.assertEqual(
f.messages,
[
u"Hello, world!",
u"It's nice to meet you.",
u"",
]
)
def test_writeBytesDecoded(self):
"""
Bytes are decoded to unicode.
"""
f = self.observedFile(encoding="utf-8")
f.write(b"Hello, Mr. S\xc3\xa1nchez\n")
self.assertEqual(f.messages, [u"Hello, Mr. S\xe1nchez"])
def test_writeUnicode(self):
"""
Unicode is unmodified.
"""
f = self.observedFile(encoding="utf-8")
f.write(u"Hello, Mr. S\xe1nchez\n")
self.assertEqual(f.messages, [u"Hello, Mr. S\xe1nchez"])
def test_writeLevel(self):
"""
Log level is emitted properly.
"""
f = self.observedFile()
f.write("Hello\n")
self.assertEqual(len(f.events), 1)
self.assertEqual(f.events[0]["log_level"], LogLevel.info)
f = self.observedFile(level=LogLevel.error)
f.write("Hello\n")
self.assertEqual(len(f.events), 1)
self.assertEqual(f.events[0]["log_level"], LogLevel.error)
def test_writeFormat(self):
"""
Log format is C{u"{message}"}.
"""
f = self.observedFile()
f.write("Hello\n")
self.assertEqual(len(f.events), 1)
self.assertEqual(f.events[0]["log_format"], u"{log_io}")
def test_writelinesBuffering(self):
"""
C{writelines} does not add newlines.
"""
# Note this is different behavior than t.p.log.StdioOnnaStick.
f = self.observedFile()
f.writelines(("Hello", ", ", ""))
self.assertEqual(f.messages, [])
f.writelines(("world!\n",))
self.assertEqual(f.messages, [u"Hello, world!"])
f.writelines(("It's nice to meet you.\n\n", "Indeed."))
self.assertEqual(
f.messages,
[
u"Hello, world!",
u"It's nice to meet you.",
u"",
]
)
def test_print(self):
"""
L{LoggingFile} can replace L{sys.stdout}.
"""
f = self.observedFile()
self.patch(sys, "stdout", f)
print("Hello,", end=" ")
print("world.")
self.assertEqual(f.messages, [u"Hello, world."])
def observedFile(self, **kwargs):
"""
Construct a L{LoggingFile} with a built-in observer.
@param kwargs: keyword arguments for the L{LoggingFile}.
@type kwargs: L{dict}
@return: a L{LoggingFile} with an observer that appends received
events into the file's C{events} attribute (a L{list}) and
event messages into the file's C{messages} attribute (a L{list}).
@rtype: L{LoggingFile}
"""
def observer(event):
f.events.append(event)
if "log_io" in event:
f.messages.append(event["log_io"])
log = Logger(observer=observer)
f = LoggingFile(logger=log, **kwargs)
f.events = []
f.messages = []
return f

View file

@ -0,0 +1,567 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.logger._json}.
"""
from io import StringIO, BytesIO
from zope.interface.verify import verifyObject, BrokenMethodImplementation
from twisted.python.compat import unicode, _PYPY, _PY3
from twisted.trial.unittest import TestCase
from twisted.python.failure import Failure
from .._observer import ILogObserver, LogPublisher
from .._format import formatEvent
from .._levels import LogLevel
from .._flatten import extractField
from .._global import globalLogPublisher
from .._json import (
eventAsJSON, eventFromJSON, jsonFileLogObserver, eventsFromJSONLogFile,
log as jsonLog
)
from .._logger import Logger
def savedJSONInvariants(testCase, savedJSON):
"""
Assert a few things about the result of L{eventAsJSON}, then return it.
@param testCase: The L{TestCase} with which to perform the assertions.
@type testCase: L{TestCase}
@param savedJSON: The result of L{eventAsJSON}.
@type savedJSON: L{unicode} (we hope)
@return: C{savedJSON}
@rtype: L{unicode}
@raise AssertionError: If any of the preconditions fail.
"""
testCase.assertIsInstance(savedJSON, unicode)
testCase.assertEqual(savedJSON.count("\n"), 0)
return savedJSON
class SaveLoadTests(TestCase):
"""
Tests for loading and saving log events.
"""
def savedEventJSON(self, event):
"""
Serialize some an events, assert some things about it, and return the
JSON.
@param event: An event.
@type event: L{dict}
@return: JSON.
"""
return savedJSONInvariants(self, eventAsJSON(event))
def test_simpleSaveLoad(self):
"""
Saving and loading an empty dictionary results in an empty dictionary.
"""
self.assertEqual(eventFromJSON(self.savedEventJSON({})), {})
def test_saveLoad(self):
"""
Saving and loading a dictionary with some simple values in it results
in those same simple values in the output; according to JSON's rules,
though, all dictionary keys must be L{unicode} and any non-L{unicode}
keys will be converted.
"""
self.assertEqual(
eventFromJSON(self.savedEventJSON({1: 2, u"3": u"4"})),
{u"1": 2, u"3": u"4"}
)
def test_saveUnPersistable(self):
"""
Saving and loading an object which cannot be represented in JSON will
result in a placeholder.
"""
self.assertEqual(
eventFromJSON(self.savedEventJSON({u"1": 2, u"3": object()})),
{u"1": 2, u"3": {u"unpersistable": True}}
)
def test_saveNonASCII(self):
"""
Non-ASCII keys and values can be saved and loaded.
"""
self.assertEqual(
eventFromJSON(self.savedEventJSON(
{u"\u1234": u"\u4321", u"3": object()}
)),
{u"\u1234": u"\u4321", u"3": {u"unpersistable": True}}
)
def test_saveBytes(self):
"""
Any L{bytes} objects will be saved as if they are latin-1 so they can
be faithfully re-loaded.
"""
def asbytes(x):
if bytes is str:
return b"".join(map(chr, x))
else:
return bytes(x)
inputEvent = {"hello": asbytes(range(255))}
if bytes is not str:
# On Python 3, bytes keys will be skipped by the JSON encoder. Not
# much we can do about that. Let's make sure that we don't get an
# error, though.
inputEvent.update({b"skipped": "okay"})
self.assertEqual(
eventFromJSON(self.savedEventJSON(inputEvent)),
{u"hello": asbytes(range(255)).decode("charmap")}
)
if _PYPY and _PY3:
test_saveBytes.skip = "https://bitbucket.org/pypy/pypy/issues/3052/"
def test_saveUnPersistableThenFormat(self):
"""
Saving and loading an object which cannot be represented in JSON, but
has a string representation which I{can} be saved as JSON, will result
in the same string formatting; any extractable fields will retain their
data types.
"""
class Reprable(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return("reprable")
inputEvent = {
"log_format": "{object} {object.value}",
"object": Reprable(7)
}
outputEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertEqual(formatEvent(outputEvent), "reprable 7")
def test_extractingFieldsPostLoad(self):
"""
L{extractField} can extract fields from an object that's been saved and
loaded from JSON.
"""
class Obj(object):
def __init__(self):
self.value = 345
inputEvent = dict(log_format="{object.value}", object=Obj())
loadedEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertEqual(extractField("object.value", loadedEvent), 345)
# The behavior of extractField is consistent between pre-persistence
# and post-persistence events, although looking up the key directly
# won't be:
self.assertRaises(KeyError, extractField, "object", loadedEvent)
self.assertRaises(KeyError, extractField, "object", inputEvent)
def test_failureStructurePreserved(self):
"""
Round-tripping a failure through L{eventAsJSON} preserves its class and
structure.
"""
events = []
log = Logger(observer=events.append)
try:
1 / 0
except ZeroDivisionError:
f = Failure()
log.failure("a message about failure", f)
import sys
if sys.exc_info()[0] is not None:
# Make sure we don't get the same Failure by accident.
sys.exc_clear()
self.assertEqual(len(events), 1)
loaded = eventFromJSON(self.savedEventJSON(events[0]))['log_failure']
self.assertIsInstance(loaded, Failure)
self.assertTrue(loaded.check(ZeroDivisionError))
self.assertIsInstance(loaded.getTraceback(), str)
def test_saveLoadLevel(self):
"""
It's important that the C{log_level} key remain a
L{constantly.NamedConstant} object.
"""
inputEvent = dict(log_level=LogLevel.warn)
loadedEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertIs(loadedEvent["log_level"], LogLevel.warn)
def test_saveLoadUnknownLevel(self):
"""
If a saved bit of JSON (let's say, from a future version of Twisted)
were to persist a different log_level, it will resolve as None.
"""
loadedEvent = eventFromJSON(
'{"log_level": {"name": "other", '
'"__class_uuid__": "02E59486-F24D-46AD-8224-3ACDF2A5732A"}}'
)
self.assertEqual(loadedEvent, dict(log_level=None))
class FileLogObserverTests(TestCase):
"""
Tests for L{jsonFileLogObserver}.
"""
def test_interface(self):
"""
A L{FileLogObserver} returned by L{jsonFileLogObserver} is an
L{ILogObserver}.
"""
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle)
try:
verifyObject(ILogObserver, observer)
except BrokenMethodImplementation as e:
self.fail(e)
def assertObserverWritesJSON(self, **kwargs):
"""
Asserts that an observer created by L{jsonFileLogObserver} with the
given arguments writes events serialized as JSON text, using the given
record separator.
@param recordSeparator: A record separator.
@type recordSeparator: L{unicode}
@param kwargs: Keyword arguments to pass to L{jsonFileLogObserver}.
@type kwargs: L{dict}
"""
recordSeparator = kwargs.get("recordSeparator", u"\x1e")
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle, **kwargs)
event = dict(x=1)
observer(event)
self.assertEqual(
fileHandle.getvalue(),
u'{0}{{"x": 1}}\n'.format(recordSeparator)
)
def test_observeWritesDefaultRecordSeparator(self):
"""
A L{FileLogObserver} created by L{jsonFileLogObserver} writes events
serialzed as JSON text to a file when it observes events.
By default, the record separator is C{u"\\x1e"}.
"""
self.assertObserverWritesJSON()
def test_observeWritesEmptyRecordSeparator(self):
"""
A L{FileLogObserver} created by L{jsonFileLogObserver} writes events
serialzed as JSON text to a file when it observes events.
This test sets the record separator to C{u""}.
"""
self.assertObserverWritesJSON(recordSeparator=u"")
def test_failureFormatting(self):
"""
A L{FileLogObserver} created by L{jsonFileLogObserver} writes failures
serialized as JSON text to a file when it observes events.
"""
io = StringIO()
publisher = LogPublisher()
logged = []
publisher.addObserver(logged.append)
publisher.addObserver(jsonFileLogObserver(io))
logger = Logger(observer=publisher)
try:
1 / 0
except:
logger.failure("failed as expected")
reader = StringIO(io.getvalue())
deserialized = list(eventsFromJSONLogFile(reader))
def checkEvents(logEvents):
self.assertEqual(len(logEvents), 1)
[failureEvent] = logEvents
self.assertIn("log_failure", failureEvent)
failureObject = failureEvent["log_failure"]
self.assertIsInstance(failureObject, Failure)
tracebackObject = failureObject.getTracebackObject()
self.assertEqual(
tracebackObject.tb_frame.f_code.co_filename.rstrip("co"),
__file__.rstrip("co")
)
checkEvents(logged)
checkEvents(deserialized)
class LogFileReaderTests(TestCase):
"""
Tests for L{eventsFromJSONLogFile}.
"""
def setUp(self):
self.errorEvents = []
def observer(event):
if (
event["log_namespace"] == jsonLog.namespace and
"record" in event
):
self.errorEvents.append(event)
self.logObserver = observer
globalLogPublisher.addObserver(observer)
def tearDown(self):
globalLogPublisher.removeObserver(self.logObserver)
def _readEvents(self, fileHandle, **kwargs):
"""
Test that L{eventsFromJSONLogFile} reads two pre-defined events from a
file: C{{u"x": 1}} and C{{u"y": 2}}.
@param fileHandle: The file to read from.
@param kwargs: Keyword arguments to pass to L{eventsFromJSONLogFile}.
"""
events = eventsFromJSONLogFile(fileHandle, **kwargs)
self.assertEqual(next(events), {u"x": 1})
self.assertEqual(next(events), {u"y": 2})
self.assertRaises(StopIteration, next, events) # No more events
def test_readEventsAutoWithRecordSeparator(self):
"""
L{eventsFromJSONLogFile} reads events from a file and automatically
detects use of C{u"\\x1e"} as the record separator.
"""
with StringIO(
u'\x1e{"x": 1}\n'
u'\x1e{"y": 2}\n'
) as fileHandle:
self._readEvents(fileHandle)
self.assertEqual(len(self.errorEvents), 0)
def test_readEventsAutoEmptyRecordSeparator(self):
"""
L{eventsFromJSONLogFile} reads events from a file and automatically
detects use of C{u""} as the record separator.
"""
with StringIO(
u'{"x": 1}\n'
u'{"y": 2}\n'
) as fileHandle:
self._readEvents(fileHandle)
self.assertEqual(len(self.errorEvents), 0)
def test_readEventsExplicitRecordSeparator(self):
"""
L{eventsFromJSONLogFile} reads events from a file and is told to use
a specific record separator.
"""
# Use u"\x08" (backspace)... because that seems weird enough.
with StringIO(
u'\x08{"x": 1}\n'
u'\x08{"y": 2}\n'
) as fileHandle:
self._readEvents(fileHandle, recordSeparator=u"\x08")
self.assertEqual(len(self.errorEvents), 0)
def test_readEventsPartialBuffer(self):
"""
L{eventsFromJSONLogFile} handles buffering a partial event.
"""
with StringIO(
u'\x1e{"x": 1}\n'
u'\x1e{"y": 2}\n'
) as fileHandle:
# Use a buffer size smaller than the event text.
self._readEvents(fileHandle, bufferSize=1)
self.assertEqual(len(self.errorEvents), 0)
def test_readTruncated(self):
"""
If the JSON text for a record is truncated, skip it.
"""
with StringIO(
u'\x1e{"x": 1'
u'\x1e{"y": 2}\n'
) as fileHandle:
events = eventsFromJSONLogFile(fileHandle)
self.assertEqual(next(events), {u"y": 2})
self.assertRaises(StopIteration, next, events) # No more events
# We should have logged the lost record
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(
self.errorEvents[0]["log_format"],
u"Unable to read truncated JSON record: {record!r}"
)
self.assertEqual(self.errorEvents[0]["record"], b'{"x": 1')
def test_readUnicode(self):
"""
If the file being read from vends L{unicode}, strings decode from JSON
as-is.
"""
# The Euro currency sign is u"\u20ac"
with StringIO(u'\x1e{"currency": "\u20ac"}\n') as fileHandle:
events = eventsFromJSONLogFile(fileHandle)
self.assertEqual(next(events), {u"currency": u"\u20ac"})
self.assertRaises(StopIteration, next, events) # No more events
self.assertEqual(len(self.errorEvents), 0)
def test_readUTF8Bytes(self):
"""
If the file being read from vends L{bytes}, strings decode from JSON as
UTF-8.
"""
# The Euro currency sign is b"\xe2\x82\xac" in UTF-8
with BytesIO(b'\x1e{"currency": "\xe2\x82\xac"}\n') as fileHandle:
events = eventsFromJSONLogFile(fileHandle)
# The Euro currency sign is u"\u20ac"
self.assertEqual(next(events), {u"currency": u"\u20ac"})
self.assertRaises(StopIteration, next, events) # No more events
self.assertEqual(len(self.errorEvents), 0)
def test_readTruncatedUTF8Bytes(self):
"""
If the JSON text for a record is truncated in the middle of a two-byte
Unicode codepoint, we don't want to see a codec exception and the
stream is read properly when the additional data arrives.
"""
# The Euro currency sign is u"\u20ac" and encodes in UTF-8 as three
# bytes: b"\xe2\x82\xac".
with BytesIO(b'\x1e{"x": "\xe2\x82\xac"}\n') as fileHandle:
events = eventsFromJSONLogFile(fileHandle, bufferSize=8)
self.assertEqual(next(events), {u"x": u"\u20ac"}) # Got unicode
self.assertRaises(StopIteration, next, events) # No more events
self.assertEqual(len(self.errorEvents), 0)
def test_readInvalidUTF8Bytes(self):
"""
If the JSON text for a record contains invalid UTF-8 text, ignore that
record.
"""
# The string b"\xe2\xac" is bogus
with BytesIO(
b'\x1e{"x": "\xe2\xac"}\n'
b'\x1e{"y": 2}\n'
) as fileHandle:
events = eventsFromJSONLogFile(fileHandle)
self.assertEqual(next(events), {u"y": 2})
self.assertRaises(StopIteration, next, events) # No more events
# We should have logged the lost record
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(
self.errorEvents[0]["log_format"],
u"Unable to decode UTF-8 for JSON record: {record!r}"
)
self.assertEqual(
self.errorEvents[0]["record"],
b'{"x": "\xe2\xac"}\n'
)
def test_readInvalidJSON(self):
"""
If the JSON text for a record is invalid, skip it.
"""
with StringIO(
u'\x1e{"x": }\n'
u'\x1e{"y": 2}\n'
) as fileHandle:
events = eventsFromJSONLogFile(fileHandle)
self.assertEqual(next(events), {u"y": 2})
self.assertRaises(StopIteration, next, events) # No more events
# We should have logged the lost record
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(
self.errorEvents[0]["log_format"],
u"Unable to read JSON record: {record!r}"
)
self.assertEqual(self.errorEvents[0]["record"], b'{"x": }\n')
def test_readUnseparated(self):
"""
Multiple events without a record separator are skipped.
"""
with StringIO(
u'\x1e{"x": 1}\n'
u'{"y": 2}\n'
) as fileHandle:
events = eventsFromJSONLogFile(fileHandle)
self.assertRaises(StopIteration, next, events) # No more events
# We should have logged the lost record
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(
self.errorEvents[0]["log_format"],
u"Unable to read JSON record: {record!r}"
)
self.assertEqual(
self.errorEvents[0]["record"],
b'{"x": 1}\n{"y": 2}\n'
)
def test_roundTrip(self):
"""
Data written by a L{FileLogObserver} returned by L{jsonFileLogObserver}
and read by L{eventsFromJSONLogFile} is reconstructed properly.
"""
event = dict(x=1)
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle)
observer(event)
fileHandle.seek(0)
events = eventsFromJSONLogFile(fileHandle)
self.assertEqual(tuple(events), (event,))
self.assertEqual(len(self.errorEvents), 0)

View file

@ -0,0 +1,472 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._legacy}.
"""
from time import time
import logging as py_logging
from zope.interface.verify import verifyObject, BrokenMethodImplementation
from twisted.trial import unittest
from twisted.python import context
from twisted.python import log as legacyLog
from twisted.python.failure import Failure
from .._levels import LogLevel
from .._observer import ILogObserver
from .._format import formatEvent
from .._legacy import LegacyLogObserverWrapper
from .._legacy import publishToNewObserver
class LegacyLogObserverWrapperTests(unittest.TestCase):
"""
Tests for L{LegacyLogObserverWrapper}.
"""
def test_interface(self):
"""
L{LegacyLogObserverWrapper} is an L{ILogObserver}.
"""
legacyObserver = lambda e: None
observer = LegacyLogObserverWrapper(legacyObserver)
try:
verifyObject(ILogObserver, observer)
except BrokenMethodImplementation as e:
self.fail(e)
def test_repr(self):
"""
L{LegacyLogObserverWrapper} returns the expected string.
"""
class LegacyObserver(object):
def __repr__(self):
return "<Legacy Observer>"
def __call__(self):
return
observer = LegacyLogObserverWrapper(LegacyObserver())
self.assertEqual(
repr(observer),
"LegacyLogObserverWrapper(<Legacy Observer>)"
)
def observe(self, event):
"""
Send an event to a wrapped legacy observer and capture the event as
seen by that observer.
@param event: an event
@type event: L{dict}
@return: the event as observed by the legacy wrapper
"""
events = []
legacyObserver = lambda e: events.append(e)
observer = LegacyLogObserverWrapper(legacyObserver)
observer(event)
self.assertEqual(len(events), 1)
return events[0]
def forwardAndVerify(self, event):
"""
Send an event to a wrapped legacy observer and verify that its data is
preserved.
@param event: an event
@type event: L{dict}
@return: the event as observed by the legacy wrapper
"""
# Make sure keys that are expected by the logging system are present
event.setdefault("log_time", time())
event.setdefault("log_system", "-")
event.setdefault("log_level", LogLevel.info)
# Send a copy: don't mutate me, bro
observed = self.observe(dict(event))
# Don't expect modifications
for key, value in event.items():
self.assertIn(key, observed)
return observed
def test_forward(self):
"""
Basic forwarding: event keys as observed by a legacy observer are the
same.
"""
self.forwardAndVerify(dict(foo=1, bar=2))
def test_time(self):
"""
The new-style C{"log_time"} key is copied to the old-style C{"time"}
key.
"""
stamp = time()
event = self.forwardAndVerify(dict(log_time=stamp))
self.assertEqual(event["time"], stamp)
def test_timeAlreadySet(self):
"""
The new-style C{"log_time"} key does not step on a pre-existing
old-style C{"time"} key.
"""
stamp = time()
event = self.forwardAndVerify(dict(log_time=stamp + 1, time=stamp))
self.assertEqual(event["time"], stamp)
def test_system(self):
"""
The new-style C{"log_system"} key is copied to the old-style
C{"system"} key.
"""
event = self.forwardAndVerify(dict(log_system="foo"))
self.assertEqual(event["system"], "foo")
def test_systemAlreadySet(self):
"""
The new-style C{"log_system"} key does not step on a pre-existing
old-style C{"system"} key.
"""
event = self.forwardAndVerify(dict(log_system="foo", system="bar"))
self.assertEqual(event["system"], "bar")
def test_noSystem(self):
"""
If the new-style C{"log_system"} key is absent, the old-style
C{"system"} key is set to C{"-"}.
"""
# Don't use forwardAndVerify(), since that sets log_system.
event = dict(log_time=time(), log_level=LogLevel.info)
observed = self.observe(dict(event))
self.assertEqual(observed["system"], "-")
def test_levelNotChange(self):
"""
If explicitly set, the C{isError} key will be preserved when forwarding
from a new-style logging emitter to a legacy logging observer,
regardless of log level.
"""
self.forwardAndVerify(dict(log_level=LogLevel.info, isError=1))
self.forwardAndVerify(dict(log_level=LogLevel.warn, isError=1))
self.forwardAndVerify(dict(log_level=LogLevel.error, isError=0))
self.forwardAndVerify(dict(log_level=LogLevel.critical, isError=0))
def test_pythonLogLevelNotSet(self):
"""
The new-style C{"log_level"} key is not translated to the old-style
C{"logLevel"} key.
Events are forwarded from the old module from to new module and are
then seen by old-style observers.
We don't want to add unexpected keys to old-style events.
"""
event = self.forwardAndVerify(dict(log_level=LogLevel.info))
self.assertNotIn("logLevel", event)
def test_stringPythonLogLevel(self):
"""
If a stdlib log level was provided as a string (eg. C{"WARNING"}) in
the legacy "logLevel" key, it does not get converted to a number.
The documentation suggested that numerical values should be used but
this was not a requirement.
"""
event = self.forwardAndVerify(dict(
logLevel="WARNING", # py_logging.WARNING is 30
))
self.assertEqual(event["logLevel"], "WARNING")
def test_message(self):
"""
The old-style C{"message"} key is added, even if no new-style
C{"log_format"} is given, as it is required, but may be empty.
"""
event = self.forwardAndVerify(dict())
self.assertEqual(event["message"], ()) # "message" is a tuple
def test_messageAlreadySet(self):
"""
The old-style C{"message"} key is not modified if it already exists.
"""
event = self.forwardAndVerify(dict(message=("foo", "bar")))
self.assertEqual(event["message"], ("foo", "bar"))
def test_format(self):
"""
Formatting is translated such that text is rendered correctly, even
though old-style logging doesn't use PEP 3101 formatting.
"""
event = self.forwardAndVerify(
dict(log_format="Hello, {who}!", who="world")
)
self.assertEqual(
legacyLog.textFromEventDict(event),
"Hello, world!"
)
def test_formatMessage(self):
"""
Using the message key, which is special in old-style, works for
new-style formatting.
"""
event = self.forwardAndVerify(
dict(log_format="Hello, {message}!", message="world")
)
self.assertEqual(
legacyLog.textFromEventDict(event),
"Hello, world!"
)
def test_formatAlreadySet(self):
"""
Formatting is not altered if the old-style C{"format"} key already
exists.
"""
event = self.forwardAndVerify(
dict(log_format="Hello!", format="Howdy!")
)
self.assertEqual(legacyLog.textFromEventDict(event), "Howdy!")
def eventWithFailure(self, **values):
"""
Create a new-style event with a captured failure.
@param values: Additional values to include in the event.
@type values: L{dict}
@return: the new event
@rtype: L{dict}
"""
failure = Failure(RuntimeError("nyargh!"))
return self.forwardAndVerify(dict(
log_failure=failure,
log_format="oopsie...",
**values
))
def test_failure(self):
"""
Captured failures in the new style set the old-style C{"failure"},
C{"isError"}, and C{"why"} keys.
"""
event = self.eventWithFailure()
self.assertIs(event["failure"], event["log_failure"])
self.assertTrue(event["isError"])
self.assertEqual(event["why"], "oopsie...")
def test_failureAlreadySet(self):
"""
Captured failures in the new style do not step on a pre-existing
old-style C{"failure"} key.
"""
failure = Failure(RuntimeError("Weak salsa!"))
event = self.eventWithFailure(failure=failure)
self.assertIs(event["failure"], failure)
def test_isErrorAlreadySet(self):
"""
Captured failures in the new style do not step on a pre-existing
old-style C{"isError"} key.
"""
event = self.eventWithFailure(isError=0)
self.assertEqual(event["isError"], 0)
def test_whyAlreadySet(self):
"""
Captured failures in the new style do not step on a pre-existing
old-style C{"failure"} key.
"""
event = self.eventWithFailure(why="blah")
self.assertEqual(event["why"], "blah")
class PublishToNewObserverTests(unittest.TestCase):
"""
Tests for L{publishToNewObserver}.
"""
def setUp(self):
self.events = []
self.observer = self.events.append
def legacyEvent(self, *message, **values):
"""
Return a basic old-style event as would be created by L{legacyLog.msg}.
@param message: a message event value in the legacy event format
@type message: L{tuple} of L{bytes}
@param values: additional event values in the legacy event format
@type event: L{dict}
@return: a legacy event
"""
event = (context.get(legacyLog.ILogContext) or {}).copy()
event.update(values)
event["message"] = message
event["time"] = time()
if "isError" not in event:
event["isError"] = 0
return event
def test_observed(self):
"""
The observer is called exactly once.
"""
publishToNewObserver(
self.observer, self.legacyEvent(), legacyLog.textFromEventDict
)
self.assertEqual(len(self.events), 1)
def test_time(self):
"""
The old-style C{"time"} key is copied to the new-style C{"log_time"}
key.
"""
publishToNewObserver(
self.observer, self.legacyEvent(), legacyLog.textFromEventDict
)
self.assertEqual(
self.events[0]["log_time"], self.events[0]["time"]
)
def test_message(self):
"""
A published old-style event should format as text in the same way as
the given C{textFromEventDict} callable would format it.
"""
def textFromEventDict(event):
return "".join(reversed(" ".join(event["message"])))
event = self.legacyEvent("Hello,", "world!")
text = textFromEventDict(event)
publishToNewObserver(self.observer, event, textFromEventDict)
self.assertEqual(formatEvent(self.events[0]), text)
def test_defaultLogLevel(self):
"""
Published event should have log level of L{LogLevel.info}.
"""
publishToNewObserver(
self.observer, self.legacyEvent(), legacyLog.textFromEventDict
)
self.assertEqual(self.events[0]["log_level"], LogLevel.info)
def test_isError(self):
"""
If C{"isError"} is set to C{1} (true) on the legacy event, the
C{"log_level"} key should get set to L{LogLevel.critical}.
"""
publishToNewObserver(
self.observer,
self.legacyEvent(isError=1),
legacyLog.textFromEventDict
)
self.assertEqual(self.events[0]["log_level"], LogLevel.critical)
def test_stdlibLogLevel(self):
"""
If the old-style C{"logLevel"} key is set to a standard library logging
level, using a predefined (L{int}) constant, the new-style
C{"log_level"} key should get set to the corresponding log level.
"""
publishToNewObserver(
self.observer,
self.legacyEvent(logLevel=py_logging.WARNING),
legacyLog.textFromEventDict
)
self.assertEqual(self.events[0]["log_level"], LogLevel.warn)
def test_stdlibLogLevelWithString(self):
"""
If the old-style C{"logLevel"} key is set to a standard library logging
level, using a string value, the new-style C{"log_level"} key should
get set to the corresponding log level.
"""
publishToNewObserver(
self.observer,
self.legacyEvent(logLevel="WARNING"),
legacyLog.textFromEventDict
)
self.assertEqual(self.events[0]["log_level"], LogLevel.warn)
def test_stdlibLogLevelWithGarbage(self):
"""
If the old-style C{"logLevel"} key is set to a standard library logging
level, using an unknown value, the new-style C{"log_level"} key should
not get set.
"""
publishToNewObserver(
self.observer,
self.legacyEvent(logLevel="Foo!!!!!"),
legacyLog.textFromEventDict
)
self.assertNotIn("log_level", self.events[0])
def test_defaultNamespace(self):
"""
Published event should have a namespace of C{"log_legacy"} to indicate
that it was forwarded from legacy logging.
"""
publishToNewObserver(
self.observer, self.legacyEvent(), legacyLog.textFromEventDict
)
self.assertEqual(self.events[0]["log_namespace"], "log_legacy")
def test_system(self):
"""
The old-style C{"system"} key is copied to the new-style
C{"log_system"} key.
"""
publishToNewObserver(
self.observer, self.legacyEvent(), legacyLog.textFromEventDict
)
self.assertEqual(
self.events[0]["log_system"], self.events[0]["system"]
)

View file

@ -0,0 +1,38 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._levels}.
"""
from twisted.trial import unittest
from .._levels import InvalidLogLevelError
from .._levels import LogLevel
class LogLevelTests(unittest.TestCase):
"""
Tests for L{LogLevel}.
"""
def test_levelWithName(self):
"""
Look up log level by name.
"""
for level in LogLevel.iterconstants():
self.assertIs(LogLevel.levelWithName(level.name), level)
def test_levelWithInvalidName(self):
"""
You can't make up log level names.
"""
bogus = "*bogus*"
try:
LogLevel.levelWithName(bogus)
except InvalidLogLevelError as e:
self.assertIs(e.level, bogus)
else:
self.fail("Expected InvalidLogLevelError.")

View file

@ -0,0 +1,268 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._logger}.
"""
from twisted.trial import unittest
from .._levels import InvalidLogLevelError
from .._levels import LogLevel
from .._format import formatEvent
from .._logger import Logger
from .._global import globalLogPublisher
class TestLogger(Logger):
"""
L{Logger} with an overridden C{emit} method that keeps track of received
events.
"""
def emit(self, level, format=None, **kwargs):
def observer(event):
self.event = event
globalLogPublisher.addObserver(observer)
try:
Logger.emit(self, level, format, **kwargs)
finally:
globalLogPublisher.removeObserver(observer)
self.emitted = {
"level": level,
"format": format,
"kwargs": kwargs,
}
class LogComposedObject(object):
"""
A regular object, with a logger attached.
"""
log = TestLogger()
def __init__(self, state=None):
self.state = state
def __str__(self):
return "<LogComposedObject {state}>".format(state=self.state)
class LoggerTests(unittest.TestCase):
"""
Tests for L{Logger}.
"""
def test_repr(self):
"""
repr() on Logger
"""
namespace = "bleargh"
log = Logger(namespace)
self.assertEqual(repr(log), "<Logger {0}>".format(repr(namespace)))
def test_namespaceDefault(self):
"""
Default namespace is module name.
"""
log = Logger()
self.assertEqual(log.namespace, __name__)
def test_namespaceOMGItsTooHard(self):
"""
Default namespace is C{"<unknown>"} when a logger is created from a
context in which is can't be determined automatically and no namespace
was specified.
"""
result = []
exec(
"result.append(Logger())",
dict(Logger=Logger), locals(),
)
self.assertEqual(result[0].namespace, "<unknown>")
def test_namespaceAttribute(self):
"""
Default namespace for classes using L{Logger} as a descriptor is the
class name they were retrieved from.
"""
obj = LogComposedObject()
expectedNamespace = "{0}.{1}".format(
obj.__module__,
obj.__class__.__name__,
)
self.assertEqual(obj.log.namespace, expectedNamespace)
self.assertEqual(LogComposedObject.log.namespace, expectedNamespace)
self.assertIs(LogComposedObject.log.source, LogComposedObject)
self.assertIs(obj.log.source, obj)
self.assertIsNone(Logger().source)
def test_descriptorObserver(self):
"""
When used as a descriptor, the observer is propagated.
"""
observed = []
class MyObject(object):
log = Logger(observer=observed.append)
MyObject.log.info("hello")
self.assertEqual(len(observed), 1)
self.assertEqual(observed[0]['log_format'], "hello")
def test_sourceAvailableForFormatting(self):
"""
On instances that have a L{Logger} class attribute, the C{log_source}
key is available to format strings.
"""
obj = LogComposedObject("hello")
log = obj.log
log.error("Hello, {log_source}.")
self.assertIn("log_source", log.event)
self.assertEqual(log.event["log_source"], obj)
stuff = formatEvent(log.event)
self.assertIn("Hello, <LogComposedObject hello>.", stuff)
def test_basicLogger(self):
"""
Test that log levels and messages are emitted correctly for
Logger.
"""
log = TestLogger()
for level in LogLevel.iterconstants():
format = "This is a {level_name} message"
message = format.format(level_name=level.name)
logMethod = getattr(log, level.name)
logMethod(format, junk=message, level_name=level.name)
# Ensure that test_emit got called with expected arguments
self.assertEqual(log.emitted["level"], level)
self.assertEqual(log.emitted["format"], format)
self.assertEqual(log.emitted["kwargs"]["junk"], message)
self.assertTrue(hasattr(log, "event"), "No event observed.")
self.assertEqual(log.event["log_format"], format)
self.assertEqual(log.event["log_level"], level)
self.assertEqual(log.event["log_namespace"], __name__)
self.assertIsNone(log.event["log_source"])
self.assertEqual(log.event["junk"], message)
self.assertEqual(formatEvent(log.event), message)
def test_sourceOnClass(self):
"""
C{log_source} event key refers to the class.
"""
def observer(event):
self.assertEqual(event["log_source"], Thingo)
class Thingo(object):
log = TestLogger(observer=observer)
Thingo.log.info()
def test_sourceOnInstance(self):
"""
C{log_source} event key refers to the instance.
"""
def observer(event):
self.assertEqual(event["log_source"], thingo)
class Thingo(object):
log = TestLogger(observer=observer)
thingo = Thingo()
thingo.log.info()
def test_sourceUnbound(self):
"""
C{log_source} event key is L{None}.
"""
def observer(event):
self.assertIsNone(event["log_source"])
log = TestLogger(observer=observer)
log.info()
def test_defaultFailure(self):
"""
Test that log.failure() emits the right data.
"""
log = TestLogger()
try:
raise RuntimeError("baloney!")
except RuntimeError:
log.failure("Whoops")
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
self.assertEqual(log.emitted["level"], LogLevel.critical)
self.assertEqual(log.emitted["format"], "Whoops")
def test_conflictingKwargs(self):
"""
Make sure that kwargs conflicting with args don't pass through.
"""
log = TestLogger()
log.warn(
u"*",
log_format="#",
log_level=LogLevel.error,
log_namespace="*namespace*",
log_source="*source*",
)
self.assertEqual(log.event["log_format"], u"*")
self.assertEqual(log.event["log_level"], LogLevel.warn)
self.assertEqual(log.event["log_namespace"], log.namespace)
self.assertIsNone(log.event["log_source"])
def test_logInvalidLogLevel(self):
"""
Test passing in a bogus log level to C{emit()}.
"""
log = TestLogger()
log.emit("*bogus*")
errors = self.flushLoggedErrors(InvalidLogLevelError)
self.assertEqual(len(errors), 1)
def test_trace(self):
"""
Tracing keeps track of forwarding to the publisher.
"""
def publisher(event):
observer(event)
def observer(event):
self.assertEqual(event["log_trace"], [(log, publisher)])
log = TestLogger(observer=publisher)
log.info("Hello.", log_trace=[])

View file

@ -0,0 +1,190 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._observer}.
"""
from zope.interface.verify import verifyObject, BrokenMethodImplementation
from twisted.trial import unittest
from .._logger import Logger
from .._observer import ILogObserver
from .._observer import LogPublisher
class LogPublisherTests(unittest.TestCase):
"""
Tests for L{LogPublisher}.
"""
def test_interface(self):
"""
L{LogPublisher} is an L{ILogObserver}.
"""
publisher = LogPublisher()
try:
verifyObject(ILogObserver, publisher)
except BrokenMethodImplementation as e:
self.fail(e)
def test_observers(self):
"""
L{LogPublisher.observers} returns the observers.
"""
o1 = lambda e: None
o2 = lambda e: None
publisher = LogPublisher(o1, o2)
self.assertEqual(set((o1, o2)), set(publisher._observers))
def test_addObserver(self):
"""
L{LogPublisher.addObserver} adds an observer.
"""
o1 = lambda e: None
o2 = lambda e: None
o3 = lambda e: None
publisher = LogPublisher(o1, o2)
publisher.addObserver(o3)
self.assertEqual(set((o1, o2, o3)), set(publisher._observers))
def test_addObserverNotCallable(self):
"""
L{LogPublisher.addObserver} refuses to add an observer that's
not callable.
"""
publisher = LogPublisher()
self.assertRaises(TypeError, publisher.addObserver, object())
def test_removeObserver(self):
"""
L{LogPublisher.removeObserver} removes an observer.
"""
o1 = lambda e: None
o2 = lambda e: None
o3 = lambda e: None
publisher = LogPublisher(o1, o2, o3)
publisher.removeObserver(o2)
self.assertEqual(set((o1, o3)), set(publisher._observers))
def test_removeObserverNotRegistered(self):
"""
L{LogPublisher.removeObserver} removes an observer that is not
registered.
"""
o1 = lambda e: None
o2 = lambda e: None
o3 = lambda e: None
publisher = LogPublisher(o1, o2)
publisher.removeObserver(o3)
self.assertEqual(set((o1, o2)), set(publisher._observers))
def test_fanOut(self):
"""
L{LogPublisher} calls its observers.
"""
event = dict(foo=1, bar=2)
events1 = []
events2 = []
events3 = []
o1 = lambda e: events1.append(e)
o2 = lambda e: events2.append(e)
o3 = lambda e: events3.append(e)
publisher = LogPublisher(o1, o2, o3)
publisher(event)
self.assertIn(event, events1)
self.assertIn(event, events2)
self.assertIn(event, events3)
def test_observerRaises(self):
"""
Observer raises an exception during fan out: a failure is logged, but
not re-raised. Life goes on.
"""
event = dict(foo=1, bar=2)
exception = RuntimeError("ARGH! EVIL DEATH!")
events = []
def observer(event):
shouldRaise = not events
events.append(event)
if shouldRaise:
raise exception
collector = []
publisher = LogPublisher(observer, collector.append)
publisher(event)
# Verify that the observer saw my event
self.assertIn(event, events)
# Verify that the observer raised my exception
errors = [
e["log_failure"] for e in collector
if "log_failure" in e
]
self.assertEqual(len(errors), 1)
self.assertIs(errors[0].value, exception)
# Make sure the exceptional observer does not receive its own error.
self.assertEqual(len(events), 1)
def test_observerRaisesAndLoggerHatesMe(self):
"""
Observer raises an exception during fan out and the publisher's Logger
pukes when the failure is reported. The exception does not propagate
back to the caller.
"""
event = dict(foo=1, bar=2)
exception = RuntimeError("ARGH! EVIL DEATH!")
def observer(event):
raise RuntimeError("Sad panda")
class GurkLogger(Logger):
def failure(self, *args, **kwargs):
raise exception
publisher = LogPublisher(observer)
publisher.log = GurkLogger()
publisher(event)
# Here, the lack of an exception thus far is a success, of sorts
def test_trace(self):
"""
Tracing keeps track of forwarding to observers.
"""
event = dict(foo=1, bar=2, log_trace=[])
traces = {}
# Copy trace to a tuple; otherwise, both observers will store the same
# mutable list, and we won't be able to see o1's view distinctly.
o1 = lambda e: traces.setdefault(1, tuple(e["log_trace"]))
o2 = lambda e: traces.setdefault(2, tuple(e["log_trace"]))
publisher = LogPublisher(o1, o2)
publisher(event)
self.assertEqual(traces[1], ((publisher, o1),))
self.assertEqual(traces[2], ((publisher, o1), (publisher, o2)))

View file

@ -0,0 +1,304 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._format}.
"""
import sys
from io import BytesIO, TextIOWrapper
import logging as py_logging
from inspect import getsourcefile
from zope.interface.verify import verifyObject, BrokenMethodImplementation
from twisted.python.compat import _PY3, currentframe
from twisted.python.failure import Failure
from twisted.trial import unittest
from .._levels import LogLevel
from .._observer import ILogObserver
from .._stdlib import STDLibLogObserver
def nextLine():
"""
Retrive the file name and line number immediately after where this function
is called.
@return: the file name and line number
@rtype: 2-L{tuple} of L{str}, L{int}
"""
caller = currentframe(1)
return (getsourcefile(sys.modules[caller.f_globals['__name__']]),
caller.f_lineno + 1)
class STDLibLogObserverTests(unittest.TestCase):
"""
Tests for L{STDLibLogObserver}.
"""
def test_interface(self):
"""
L{STDLibLogObserver} is an L{ILogObserver}.
"""
observer = STDLibLogObserver()
try:
verifyObject(ILogObserver, observer)
except BrokenMethodImplementation as e:
self.fail(e)
def py_logger(self):
"""
Create a logging object we can use to test with.
@return: a stdlib-style logger
@rtype: L{StdlibLoggingContainer}
"""
logger = StdlibLoggingContainer()
self.addCleanup(logger.close)
return logger
def logEvent(self, *events):
"""
Send one or more events to Python's logging module, and
capture the emitted L{logging.LogRecord}s and output stream as
a string.
@param events: events
@type events: L{tuple} of L{dict}
@return: a tuple: (records, output)
@rtype: 2-tuple of (L{list} of L{logging.LogRecord}, L{bytes}.)
"""
pl = self.py_logger()
observer = STDLibLogObserver(
# Add 1 to default stack depth to skip *this* frame, since
# tests will want to know about their own frames.
stackDepth=STDLibLogObserver.defaultStackDepth + 1
)
for event in events:
observer(event)
return pl.bufferedHandler.records, pl.outputAsText()
def test_name(self):
"""
Logger name.
"""
records, output = self.logEvent({})
self.assertEqual(len(records), 1)
self.assertEqual(records[0].name, "twisted")
def test_levels(self):
"""
Log levels.
"""
levelMapping = {
None: py_logging.INFO, # Default
LogLevel.debug: py_logging.DEBUG,
LogLevel.info: py_logging.INFO,
LogLevel.warn: py_logging.WARNING,
LogLevel.error: py_logging.ERROR,
LogLevel.critical: py_logging.CRITICAL,
}
# Build a set of events for each log level
events = []
for level, pyLevel in levelMapping.items():
event = {}
# Set the log level on the event, except for default
if level is not None:
event["log_level"] = level
# Remember the Python log level we expect to see for this
# event (as an int)
event["py_levelno"] = int(pyLevel)
events.append(event)
records, output = self.logEvent(*events)
self.assertEqual(len(records), len(levelMapping))
# Check that each event has the correct level
for i in range(len(records)):
self.assertEqual(records[i].levelno, events[i]["py_levelno"])
def test_callerInfo(self):
"""
C{pathname}, C{lineno}, C{exc_info}, C{func} is set properly on
records.
"""
filename, logLine = nextLine()
records, output = self.logEvent({})
self.assertEqual(len(records), 1)
self.assertEqual(records[0].pathname, filename)
self.assertEqual(records[0].lineno, logLine)
self.assertIsNone(records[0].exc_info)
# Attribute "func" is missing from record, which is weird because it's
# documented.
# self.assertEqual(records[0].func, "test_callerInfo")
def test_basicFormat(self):
"""
Basic formattable event passes the format along correctly.
"""
event = dict(log_format="Hello, {who}!", who="dude")
records, output = self.logEvent(event)
self.assertEqual(len(records), 1)
self.assertEqual(str(records[0].msg), u"Hello, dude!")
self.assertEqual(records[0].args, ())
def test_basicFormatRendered(self):
"""
Basic formattable event renders correctly.
"""
event = dict(log_format="Hello, {who}!", who="dude")
records, output = self.logEvent(event)
self.assertEqual(len(records), 1)
self.assertTrue(output.endswith(u":Hello, dude!\n"),
repr(output))
def test_noFormat(self):
"""
Event with no format.
"""
records, output = self.logEvent({})
self.assertEqual(len(records), 1)
self.assertEqual(str(records[0].msg), "")
def test_failure(self):
"""
An event with a failure logs the failure details as well.
"""
def failing_func():
1 / 0
try:
failing_func()
except ZeroDivisionError:
failure = Failure()
event = dict(log_format='Hi mom', who='me', log_failure=failure)
records, output = self.logEvent(event)
self.assertEqual(len(records), 1)
self.assertIn(u'Hi mom', output)
self.assertIn(u'in failing_func', output)
self.assertIn(u'ZeroDivisionError', output)
def test_cleanedFailure(self):
"""
A cleaned Failure object has a fake traceback object; make sure that
logging such a failure still results in the exception details being
logged.
"""
def failing_func():
1 / 0
try:
failing_func()
except ZeroDivisionError:
failure = Failure()
failure.cleanFailure()
event = dict(log_format='Hi mom', who='me', log_failure=failure)
records, output = self.logEvent(event)
self.assertEqual(len(records), 1)
self.assertIn(u'Hi mom', output)
self.assertIn(u'in failing_func', output)
self.assertIn(u'ZeroDivisionError', output)
class StdlibLoggingContainer(object):
"""
Continer for a test configuration of stdlib logging objects.
"""
def __init__(self):
self.rootLogger = py_logging.getLogger("")
self.originalLevel = self.rootLogger.getEffectiveLevel()
self.rootLogger.setLevel(py_logging.DEBUG)
self.bufferedHandler = BufferedHandler()
self.rootLogger.addHandler(self.bufferedHandler)
self.streamHandler, self.output = handlerAndBytesIO()
self.rootLogger.addHandler(self.streamHandler)
def close(self):
"""
Close the logger.
"""
self.rootLogger.setLevel(self.originalLevel)
self.rootLogger.removeHandler(self.bufferedHandler)
self.rootLogger.removeHandler(self.streamHandler)
self.streamHandler.close()
self.output.close()
def outputAsText(self):
"""
Get the output to the underlying stream as text.
@return: the output text
@rtype: L{unicode}
"""
return self.output.getvalue().decode("utf-8")
def handlerAndBytesIO():
"""
Construct a 2-tuple of C{(StreamHandler, BytesIO)} for testing interaction
with the 'logging' module.
@return: handler and io object
@rtype: tuple of L{StreamHandler} and L{io.BytesIO}
"""
output = BytesIO()
stream = output
template = py_logging.BASIC_FORMAT
if _PY3:
stream = TextIOWrapper(output, encoding="utf-8", newline="\n")
formatter = py_logging.Formatter(template)
handler = py_logging.StreamHandler(stream)
handler.setFormatter(formatter)
return handler, output
class BufferedHandler(py_logging.Handler):
"""
A L{py_logging.Handler} that remembers all logged records in a list.
"""
def __init__(self):
"""
Initialize this L{BufferedHandler}.
"""
py_logging.Handler.__init__(self)
self.records = []
def emit(self, record):
"""
Remember the record.
"""
self.records.append(record)

View file

@ -0,0 +1,107 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._util}.
"""
from twisted.trial import unittest
from .._observer import LogPublisher
from .._util import formatTrace
class UtilTests(unittest.TestCase):
"""
Utility tests.
"""
def test_trace(self):
"""
Tracing keeps track of forwarding done by the publisher.
"""
publisher = LogPublisher()
event = dict(log_trace=[])
o1 = lambda e: None
def o2(e):
self.assertIs(e, event)
self.assertEqual(
e["log_trace"],
[
(publisher, o1),
(publisher, o2),
# Event hasn't been sent to o3 yet
]
)
def o3(e):
self.assertIs(e, event)
self.assertEqual(
e["log_trace"],
[
(publisher, o1),
(publisher, o2),
(publisher, o3),
]
)
publisher.addObserver(o1)
publisher.addObserver(o2)
publisher.addObserver(o3)
publisher(event)
def test_formatTrace(self):
"""
Format trace as string.
"""
event = dict(log_trace=[])
def noOp(e):
pass
o1, o2, o3, o4, o5 = noOp, noOp, noOp, noOp, noOp
o1.name = "root/o1"
o2.name = "root/p1/o2"
o3.name = "root/p1/o3"
o4.name = "root/p1/p2/o4"
o5.name = "root/o5"
def testObserver(e):
self.assertIs(e, event)
trace = formatTrace(e["log_trace"])
self.assertEqual(
trace,
(
u"{root} ({root.name})\n"
u" -> {o1} ({o1.name})\n"
u" -> {p1} ({p1.name})\n"
u" -> {o2} ({o2.name})\n"
u" -> {o3} ({o3.name})\n"
u" -> {p2} ({p2.name})\n"
u" -> {o4} ({o4.name})\n"
u" -> {o5} ({o5.name})\n"
u" -> {oTest}\n"
).format(
root=root,
o1=o1, o2=o2, o3=o3, o4=o4, o5=o5,
p1=p1, p2=p2,
oTest=oTest
)
)
oTest = testObserver
p2 = LogPublisher(o4)
p1 = LogPublisher(o2, o3, p2)
p2.name = "root/p1/p2/"
p1.name = "root/p1/"
root = LogPublisher(o1, p1, o5, oTest)
root.name = "root/"
root(event)