Ausgabe der neuen DB Einträge

This commit is contained in:
hubobel 2022-01-02 21:50:48 +01:00
parent bad48e1627
commit cfbbb9ee3d
2399 changed files with 843193 additions and 43 deletions

View file

@ -0,0 +1,6 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet}.
"""

View file

@ -0,0 +1,221 @@
# -*- test-case-name: twisted.internet.test.test_coroutines -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for C{await} support in Deferreds.
These tests can only work and be imported on Python 3.5+!
"""
import types
from twisted.python.failure import Failure
from twisted.internet.defer import (
Deferred, maybeDeferred, ensureDeferred, fail
)
from twisted.trial.unittest import TestCase
from twisted.internet.task import Clock
class SampleException(Exception):
"""
A specific sample exception for testing.
"""
class AwaitTests(TestCase):
"""
Tests for using Deferreds in conjunction with PEP-492.
"""
def test_awaitReturnsIterable(self):
"""
C{Deferred.__await__} returns an iterable.
"""
d = Deferred()
awaitedDeferred = d.__await__()
self.assertEqual(awaitedDeferred, iter(awaitedDeferred))
def test_ensureDeferred(self):
"""
L{ensureDeferred} will turn a coroutine into a L{Deferred}.
"""
async def run():
d = Deferred()
d.callback("bar")
await d
res = await run2()
return res
async def run2():
d = Deferred()
d.callback("foo")
res = await d
return res
# It's a coroutine...
r = run()
self.assertIsInstance(r, types.CoroutineType)
# Now it's a Deferred.
d = ensureDeferred(r)
self.assertIsInstance(d, Deferred)
# The Deferred has the result we want.
res = self.successResultOf(d)
self.assertEqual(res, "foo")
def test_basic(self):
"""
L{ensureDeferred} allows a function to C{await} on a L{Deferred}.
"""
async def run():
d = Deferred()
d.callback("foo")
res = await d
return res
d = ensureDeferred(run())
res = self.successResultOf(d)
self.assertEqual(res, "foo")
def test_exception(self):
"""
An exception in a coroutine wrapped with L{ensureDeferred} will cause
the returned L{Deferred} to fire with a failure.
"""
async def run():
d = Deferred()
d.callback("foo")
await d
raise ValueError("Oh no!")
d = ensureDeferred(run())
res = self.failureResultOf(d)
self.assertEqual(type(res.value), ValueError)
self.assertEqual(res.value.args, ("Oh no!",))
def test_synchronousDeferredFailureTraceback(self):
"""
When a Deferred is awaited upon that has already failed with a Failure
that has a traceback, both the place that the synchronous traceback
comes from and the awaiting line are shown in the traceback.
"""
def raises():
raise SampleException()
it = maybeDeferred(raises)
async def doomed():
return await it
failure = self.failureResultOf(ensureDeferred(doomed()))
self.assertIn(", in doomed\n", failure.getTraceback())
self.assertIn(", in raises\n", failure.getTraceback())
def test_asyncDeferredFailureTraceback(self):
"""
When a Deferred is awaited upon that later fails with a Failure that
has a traceback, both the place that the synchronous traceback comes
from and the awaiting line are shown in the traceback.
"""
def returnsFailure():
try:
raise SampleException()
except SampleException:
return Failure()
it = Deferred()
async def doomed():
return await it
started = ensureDeferred(doomed())
self.assertNoResult(started)
it.errback(returnsFailure())
failure = self.failureResultOf(started)
self.assertIn(", in doomed\n", failure.getTraceback())
self.assertIn(", in returnsFailure\n", failure.getTraceback())
def test_twoDeep(self):
"""
A coroutine wrapped with L{ensureDeferred} that awaits a L{Deferred}
suspends its execution until the inner L{Deferred} fires.
"""
reactor = Clock()
sections = []
async def runone():
sections.append(2)
d = Deferred()
reactor.callLater(1, d.callback, 2)
await d
sections.append(3)
return "Yay!"
async def run():
sections.append(1)
result = await runone()
sections.append(4)
d = Deferred()
reactor.callLater(1, d.callback, 1)
await d
sections.append(5)
return result
d = ensureDeferred(run())
reactor.advance(0.9)
self.assertEqual(sections, [1, 2])
reactor.advance(0.1)
self.assertEqual(sections, [1, 2, 3, 4])
reactor.advance(0.9)
self.assertEqual(sections, [1, 2, 3, 4])
reactor.advance(0.1)
self.assertEqual(sections, [1, 2, 3, 4, 5])
res = self.successResultOf(d)
self.assertEqual(res, "Yay!")
def test_reraise(self):
"""
Awaiting an already failed Deferred will raise the exception.
"""
async def test():
try:
await fail(ValueError("Boom"))
except ValueError as e:
self.assertEqual(e.args, ("Boom",))
return 1
return 0
res = self.successResultOf(ensureDeferred(test()))
self.assertEqual(res, 1)
def test_chained(self):
"""
Awaiting a paused & chained Deferred will give the result when it has
one.
"""
reactor = Clock()
async def test():
d = Deferred()
d2 = Deferred()
d.addCallback(lambda ignored: d2)
d.callback(None)
reactor.callLater(0, d2.callback, "bye")
return await d
d = ensureDeferred(test())
reactor.advance(0.1)
res = self.successResultOf(d)
self.assertEqual(res, "bye")

View file

@ -0,0 +1,177 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
POSIX implementation of local network interface enumeration.
"""
from __future__ import division, absolute_import
import sys, socket
from socket import AF_INET, AF_INET6, inet_ntop
from ctypes import (
CDLL, POINTER, Structure, c_char_p, c_ushort, c_int,
c_uint32, c_uint8, c_void_p, c_ubyte, pointer, cast)
from ctypes.util import find_library
from twisted.python.compat import _PY3, nativeString
if _PY3:
# Once #6070 is implemented, this can be replaced with the implementation
# from that ticket:
def chr(i):
"""
Python 3 implementation of Python 2 chr(), i.e. convert an integer to
corresponding byte.
"""
return bytes([i])
libc = CDLL(find_library("c"))
if sys.platform.startswith('freebsd') or sys.platform == 'darwin':
_sockaddrCommon = [
("sin_len", c_uint8),
("sin_family", c_uint8),
]
else:
_sockaddrCommon = [
("sin_family", c_ushort),
]
class in_addr(Structure):
_fields_ = [
("in_addr", c_ubyte * 4),
]
class in6_addr(Structure):
_fields_ = [
("in_addr", c_ubyte * 16),
]
class sockaddr(Structure):
_fields_ = _sockaddrCommon + [
("sin_port", c_ushort),
]
class sockaddr_in(Structure):
_fields_ = _sockaddrCommon + [
("sin_port", c_ushort),
("sin_addr", in_addr),
]
class sockaddr_in6(Structure):
_fields_ = _sockaddrCommon + [
("sin_port", c_ushort),
("sin_flowinfo", c_uint32),
("sin_addr", in6_addr),
]
class ifaddrs(Structure):
pass
ifaddrs_p = POINTER(ifaddrs)
ifaddrs._fields_ = [
('ifa_next', ifaddrs_p),
('ifa_name', c_char_p),
('ifa_flags', c_uint32),
('ifa_addr', POINTER(sockaddr)),
('ifa_netmask', POINTER(sockaddr)),
('ifa_dstaddr', POINTER(sockaddr)),
('ifa_data', c_void_p)]
getifaddrs = libc.getifaddrs
getifaddrs.argtypes = [POINTER(ifaddrs_p)]
getifaddrs.restype = c_int
freeifaddrs = libc.freeifaddrs
freeifaddrs.argtypes = [ifaddrs_p]
def _maybeCleanupScopeIndex(family, packed):
"""
On FreeBSD, kill the embedded interface indices in link-local scoped
addresses.
@param family: The address family of the packed address - one of the
I{socket.AF_*} constants.
@param packed: The packed representation of the address (ie, the bytes of a
I{in_addr} field).
@type packed: L{bytes}
@return: The packed address with any FreeBSD-specific extra bits cleared.
@rtype: L{bytes}
@see: U{https://twistedmatrix.com/trac/ticket/6843}
@see: U{http://www.freebsd.org/doc/en/books/developers-handbook/ipv6.html#ipv6-scope-index}
@note: Indications are that the need for this will be gone in FreeBSD >=10.
"""
if sys.platform.startswith('freebsd') and packed[:2] == b"\xfe\x80":
return packed[:2] + b"\x00\x00" + packed[4:]
return packed
def _interfaces():
"""
Call C{getifaddrs(3)} and return a list of tuples of interface name, address
family, and human-readable address representing its results.
"""
ifaddrs = ifaddrs_p()
if getifaddrs(pointer(ifaddrs)) < 0:
raise OSError()
results = []
try:
while ifaddrs:
if ifaddrs[0].ifa_addr:
family = ifaddrs[0].ifa_addr[0].sin_family
if family == AF_INET:
addr = cast(ifaddrs[0].ifa_addr, POINTER(sockaddr_in))
elif family == AF_INET6:
addr = cast(ifaddrs[0].ifa_addr, POINTER(sockaddr_in6))
else:
addr = None
if addr:
packed = b''.join(map(chr, addr[0].sin_addr.in_addr[:]))
packed = _maybeCleanupScopeIndex(family, packed)
results.append((
ifaddrs[0].ifa_name,
family,
inet_ntop(family, packed)))
ifaddrs = ifaddrs[0].ifa_next
finally:
freeifaddrs(ifaddrs)
return results
def posixGetLinkLocalIPv6Addresses():
"""
Return a list of strings in colon-hex format representing all the link local
IPv6 addresses available on the system, as reported by I{getifaddrs(3)}.
"""
retList = []
for (interface, family, address) in _interfaces():
interface = nativeString(interface)
address = nativeString(address)
if family == socket.AF_INET6 and address.startswith('fe80:'):
retList.append('%s%%%s' % (address, interface))
return retList

View file

@ -0,0 +1,120 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Windows implementation of local network interface enumeration.
"""
from socket import socket, AF_INET6, SOCK_STREAM
from ctypes import (
WinDLL, byref, create_string_buffer, create_unicode_buffer,
c_int, c_void_p,
POINTER, Structure, cast, wstring_at)
WS2_32 = WinDLL('ws2_32')
SOCKET = c_int
DWORD = c_int
LPVOID = c_void_p
LPSOCKADDR = c_void_p
LPWSAPROTOCOL_INFO = c_void_p
LPTSTR = c_void_p
LPDWORD = c_void_p
LPWSAOVERLAPPED = c_void_p
LPWSAOVERLAPPED_COMPLETION_ROUTINE = c_void_p
# http://msdn.microsoft.com/en-us/library/ms741621(v=VS.85).aspx
# int WSAIoctl(
# __in SOCKET s,
# __in DWORD dwIoControlCode,
# __in LPVOID lpvInBuffer,
# __in DWORD cbInBuffer,
# __out LPVOID lpvOutBuffer,
# __in DWORD cbOutBuffer,
# __out LPDWORD lpcbBytesReturned,
# __in LPWSAOVERLAPPED lpOverlapped,
# __in LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine
# );
WSAIoctl = WS2_32.WSAIoctl
WSAIoctl.argtypes = [
SOCKET, DWORD, LPVOID, DWORD, LPVOID, DWORD, LPDWORD,
LPWSAOVERLAPPED, LPWSAOVERLAPPED_COMPLETION_ROUTINE]
WSAIoctl.restype = c_int
# http://msdn.microsoft.com/en-us/library/ms741516(VS.85).aspx
# INT WSAAPI WSAAddressToString(
# __in LPSOCKADDR lpsaAddress,
# __in DWORD dwAddressLength,
# __in_opt LPWSAPROTOCOL_INFO lpProtocolInfo,
# __inout LPTSTR lpszAddressString,
# __inout LPDWORD lpdwAddressStringLength
# );
WSAAddressToString = WS2_32.WSAAddressToStringW
WSAAddressToString.argtypes = [
LPSOCKADDR, DWORD, LPWSAPROTOCOL_INFO, LPTSTR, LPDWORD]
WSAAddressToString.restype = c_int
SIO_ADDRESS_LIST_QUERY = 0x48000016
WSAEFAULT = 10014
class SOCKET_ADDRESS(Structure):
_fields_ = [('lpSockaddr', c_void_p),
('iSockaddrLength', c_int)]
def make_SAL(ln):
class SOCKET_ADDRESS_LIST(Structure):
_fields_ = [('iAddressCount', c_int),
('Address', SOCKET_ADDRESS * ln)]
return SOCKET_ADDRESS_LIST
def win32GetLinkLocalIPv6Addresses():
"""
Return a list of strings in colon-hex format representing all the link local
IPv6 addresses available on the system, as reported by
I{WSAIoctl}/C{SIO_ADDRESS_LIST_QUERY}.
"""
s = socket(AF_INET6, SOCK_STREAM)
size = 4096
retBytes = c_int()
for i in range(2):
buf = create_string_buffer(size)
ret = WSAIoctl(
s.fileno(),
SIO_ADDRESS_LIST_QUERY, 0, 0, buf, size, byref(retBytes), 0, 0)
# WSAIoctl might fail with WSAEFAULT, which means there was not enough
# space in the buffer we gave it. There's no way to check the errno
# until Python 2.6, so we don't even try. :/ Maybe if retBytes is still
# 0 another error happened, though.
if ret and retBytes.value:
size = retBytes.value
else:
break
# If it failed, then we'll just have to give up. Still no way to see why.
if ret:
raise RuntimeError("WSAIoctl failure")
addrList = cast(buf, POINTER(make_SAL(0)))
addrCount = addrList[0].iAddressCount
addrList = cast(buf, POINTER(make_SAL(addrCount)))
addressStringBufLength = 1024
addressStringBuf = create_unicode_buffer(addressStringBufLength)
retList = []
for i in range(addrList[0].iAddressCount):
retBytes.value = addressStringBufLength
address = addrList[0].Address[i]
ret = WSAAddressToString(
address.lpSockaddr, address.iSockaddrLength, 0, addressStringBuf,
byref(retBytes))
if ret:
raise RuntimeError("WSAAddressToString failure")
retList.append(wstring_at(addressStringBuf))
return [addr for addr in retList if '%' in addr]

View file

@ -0,0 +1,167 @@
# -*- test-case-name: twisted.internet.test.test_coroutines -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for C{yield from} support in Deferreds.
These tests can only work and be imported on Python 3!
"""
import types
from twisted.internet.defer import Deferred, ensureDeferred, fail
from twisted.trial.unittest import TestCase
from twisted.internet.task import Clock
class YieldFromTests(TestCase):
"""
Tests for using Deferreds in conjunction with PEP-380.
"""
def test_ensureDeferred(self):
"""
L{ensureDeferred} will turn a coroutine into a L{Deferred}.
"""
def run():
d = Deferred()
d.callback("bar")
yield from d
res = yield from run2()
return res
def run2():
d = Deferred()
d.callback("foo")
res = yield from d
return res
# It's a generator...
r = run()
self.assertIsInstance(r, types.GeneratorType)
# Now it's a Deferred.
d = ensureDeferred(r)
self.assertIsInstance(d, Deferred)
# The Deferred has the result we want.
res = self.successResultOf(d)
self.assertEqual(res, "foo")
def test_basic(self):
"""
L{ensureDeferred} allows a function to C{yield from} a L{Deferred}.
"""
def run():
d = Deferred()
d.callback("foo")
res = yield from d
return res
d = ensureDeferred(run())
res = self.successResultOf(d)
self.assertEqual(res, "foo")
def test_exception(self):
"""
An exception in a generator wrapped with L{ensureDeferred} will cause
the returned L{Deferred} to fire with a failure.
"""
def run():
d = Deferred()
d.callback("foo")
yield from d
raise ValueError("Oh no!")
d = ensureDeferred(run())
res = self.failureResultOf(d)
self.assertEqual(type(res.value), ValueError)
self.assertEqual(res.value.args, ("Oh no!",))
def test_twoDeep(self):
"""
An exception in a generator wrapped with L{ensureDeferred} will cause
the returned L{Deferred} to fire with a failure.
"""
reactor = Clock()
sections = []
def runone():
sections.append(2)
d = Deferred()
reactor.callLater(1, d.callback, None)
yield from d
sections.append(3)
return "Yay!"
def run():
sections.append(1)
result = yield from runone()
sections.append(4)
d = Deferred()
reactor.callLater(1, d.callback, None)
yield from d
sections.append(5)
return result
d = ensureDeferred(run())
reactor.advance(0.9)
self.assertEqual(sections, [1, 2])
reactor.advance(0.1)
self.assertEqual(sections, [1, 2, 3, 4])
reactor.advance(0.9)
self.assertEqual(sections, [1, 2, 3, 4])
reactor.advance(0.1)
self.assertEqual(sections, [1, 2, 3, 4, 5])
res = self.successResultOf(d)
self.assertEqual(res, "Yay!")
def test_reraise(self):
"""
Yielding from an already failed Deferred will raise the exception.
"""
def test():
try:
yield from fail(ValueError("Boom"))
except ValueError as e:
self.assertEqual(e.args, ("Boom",))
return 1
return 0
res = self.successResultOf(ensureDeferred(test()))
self.assertEqual(res, 1)
def test_chained(self):
"""
Yielding from a paused & chained Deferred will give the result when it
has one.
"""
reactor = Clock()
def test():
d = Deferred()
d2 = Deferred()
d.addCallback(lambda ignored: d2)
d.callback(None)
reactor.callLater(0, d2.callback, "bye")
res = yield from d
return res
d = ensureDeferred(test())
reactor.advance(0.1)
res = self.successResultOf(d)
self.assertEqual(res, "bye")

View file

@ -0,0 +1,612 @@
# -*- test-case-name: twisted.internet.test.test_tcp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Various helpers for tests for connection-oriented transports.
"""
from __future__ import division, absolute_import
import socket
from gc import collect
from weakref import ref
from zope.interface.verify import verifyObject
from twisted.python import context, log
from twisted.python.failure import Failure
from twisted.python.runtime import platform
from twisted.python.log import ILogContext, msg, err
from twisted.internet.defer import Deferred, gatherResults
from twisted.internet.interfaces import IConnector, IReactorFDSet
from twisted.internet.protocol import ClientFactory, Protocol, ServerFactory
from twisted.trial.unittest import SkipTest
from twisted.internet.test.reactormixins import needsRunningReactor
from twisted.test.test_tcp import ClosingProtocol
def findFreePort(interface='127.0.0.1', family=socket.AF_INET,
type=socket.SOCK_STREAM):
"""
Ask the platform to allocate a free port on the specified interface, then
release the socket and return the address which was allocated.
@param interface: The local address to try to bind the port on.
@type interface: C{str}
@param type: The socket type which will use the resulting port.
@return: A two-tuple of address and port, like that returned by
L{socket.getsockname}.
"""
addr = socket.getaddrinfo(interface, 0)[0][4]
probe = socket.socket(family, type)
try:
probe.bind(addr)
if family == socket.AF_INET6:
sockname = probe.getsockname()
hostname = socket.getnameinfo(
sockname, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)[0]
return (hostname, sockname[1])
else:
return probe.getsockname()
finally:
probe.close()
class ConnectableProtocol(Protocol):
"""
A protocol to be used with L{runProtocolsWithReactor}.
The protocol and its pair should eventually disconnect from each other.
@ivar reactor: The reactor used in this test.
@ivar disconnectReason: The L{Failure} passed to C{connectionLost}.
@ivar _done: A L{Deferred} which will be fired when the connection is
lost.
"""
disconnectReason = None
def _setAttributes(self, reactor, done):
"""
Set attributes on the protocol that are known only externally; this
will be called by L{runProtocolsWithReactor} when this protocol is
instantiated.
@param reactor: The reactor used in this test.
@param done: A L{Deferred} which will be fired when the connection is
lost.
"""
self.reactor = reactor
self._done = done
def connectionLost(self, reason):
self.disconnectReason = reason
self._done.callback(None)
del self._done
class EndpointCreator(object):
"""
Create client and server endpoints that know how to connect to each other.
"""
def server(self, reactor):
"""
Return an object providing C{IStreamServerEndpoint} for use in creating
a server to use to establish the connection type to be tested.
"""
raise NotImplementedError()
def client(self, reactor, serverAddress):
"""
Return an object providing C{IStreamClientEndpoint} for use in creating
a client to use to establish the connection type to be tested.
"""
raise NotImplementedError()
class _SingleProtocolFactory(ClientFactory):
"""
Factory to be used by L{runProtocolsWithReactor}.
It always returns the same protocol (i.e. is intended for only a single
connection).
"""
def __init__(self, protocol):
self._protocol = protocol
def buildProtocol(self, addr):
return self._protocol
def runProtocolsWithReactor(reactorBuilder, serverProtocol, clientProtocol,
endpointCreator):
"""
Connect two protocols using endpoints and a new reactor instance.
A new reactor will be created and run, with the client and server protocol
instances connected to each other using the given endpoint creator. The
protocols should run through some set of tests, then disconnect; when both
have disconnected the reactor will be stopped and the function will
return.
@param reactorBuilder: A L{ReactorBuilder} instance.
@param serverProtocol: A L{ConnectableProtocol} that will be the server.
@param clientProtocol: A L{ConnectableProtocol} that will be the client.
@param endpointCreator: An instance of L{EndpointCreator}.
@return: The reactor run by this test.
"""
reactor = reactorBuilder.buildReactor()
serverProtocol._setAttributes(reactor, Deferred())
clientProtocol._setAttributes(reactor, Deferred())
serverFactory = _SingleProtocolFactory(serverProtocol)
clientFactory = _SingleProtocolFactory(clientProtocol)
# Listen on a port:
serverEndpoint = endpointCreator.server(reactor)
d = serverEndpoint.listen(serverFactory)
# Connect to the port:
def gotPort(p):
clientEndpoint = endpointCreator.client(
reactor, p.getHost())
return clientEndpoint.connect(clientFactory)
d.addCallback(gotPort)
# Stop reactor when both connections are lost:
def failed(result):
log.err(result, "Connection setup failed.")
disconnected = gatherResults([serverProtocol._done, clientProtocol._done])
d.addCallback(lambda _: disconnected)
d.addErrback(failed)
d.addCallback(lambda _: needsRunningReactor(reactor, reactor.stop))
reactorBuilder.runReactor(reactor)
return reactor
def _getWriters(reactor):
"""
Like L{IReactorFDSet.getWriters}, but with support for IOCP reactor as
well.
"""
if IReactorFDSet.providedBy(reactor):
return reactor.getWriters()
elif 'IOCP' in reactor.__class__.__name__:
return reactor.handles
else:
# Cannot tell what is going on.
raise Exception("Cannot find writers on %r" % (reactor,))
class _AcceptOneClient(ServerFactory):
"""
This factory fires a L{Deferred} with a protocol instance shortly after it
is constructed (hopefully long enough afterwards so that it has been
connected to a transport).
@ivar reactor: The reactor used to schedule the I{shortly}.
@ivar result: A L{Deferred} which will be fired with the protocol instance.
"""
def __init__(self, reactor, result):
self.reactor = reactor
self.result = result
def buildProtocol(self, addr):
protocol = ServerFactory.buildProtocol(self, addr)
self.reactor.callLater(0, self.result.callback, protocol)
return protocol
class _SimplePullProducer(object):
"""
A pull producer which writes one byte whenever it is resumed. For use by
C{test_unregisterProducerAfterDisconnect}.
"""
def __init__(self, consumer):
self.consumer = consumer
def stopProducing(self):
pass
def resumeProducing(self):
log.msg("Producer.resumeProducing")
self.consumer.write(b'x')
class Stop(ClientFactory):
"""
A client factory which stops a reactor when a connection attempt fails.
"""
failReason = None
def __init__(self, reactor):
self.reactor = reactor
def clientConnectionFailed(self, connector, reason):
self.failReason = reason
msg("Stop(CF) cCFailed: %s" % (reason.getErrorMessage(),))
self.reactor.stop()
class ClosingLaterProtocol(ConnectableProtocol):
"""
ClosingLaterProtocol exchanges one byte with its peer and then disconnects
itself. This is mostly a work-around for the fact that connectionMade is
called before the SSL handshake has completed.
"""
def __init__(self, onConnectionLost):
self.lostConnectionReason = None
self.onConnectionLost = onConnectionLost
def connectionMade(self):
msg("ClosingLaterProtocol.connectionMade")
def dataReceived(self, bytes):
msg("ClosingLaterProtocol.dataReceived %r" % (bytes,))
self.transport.loseConnection()
def connectionLost(self, reason):
msg("ClosingLaterProtocol.connectionLost")
self.lostConnectionReason = reason
self.onConnectionLost.callback(self)
class ConnectionTestsMixin(object):
"""
This mixin defines test methods which should apply to most L{ITransport}
implementations.
"""
# This should be a reactormixins.EndpointCreator instance.
endpoints = None
def test_logPrefix(self):
"""
Client and server transports implement L{ILoggingContext.logPrefix} to
return a message reflecting the protocol they are running.
"""
class CustomLogPrefixProtocol(ConnectableProtocol):
def __init__(self, prefix):
self._prefix = prefix
self.system = None
def connectionMade(self):
self.transport.write(b"a")
def logPrefix(self):
return self._prefix
def dataReceived(self, bytes):
self.system = context.get(ILogContext)["system"]
self.transport.write(b"b")
# Only close connection if both sides have received data, so
# that both sides have system set.
if b"b" in bytes:
self.transport.loseConnection()
client = CustomLogPrefixProtocol("Custom Client")
server = CustomLogPrefixProtocol("Custom Server")
runProtocolsWithReactor(self, server, client, self.endpoints)
self.assertIn("Custom Client", client.system)
self.assertIn("Custom Server", server.system)
def test_writeAfterDisconnect(self):
"""
After a connection is disconnected, L{ITransport.write} and
L{ITransport.writeSequence} are no-ops.
"""
reactor = self.buildReactor()
finished = []
serverConnectionLostDeferred = Deferred()
protocol = lambda: ClosingLaterProtocol(serverConnectionLostDeferred)
portDeferred = self.endpoints.server(reactor).listen(
ServerFactory.forProtocol(protocol))
def listening(port):
msg("Listening on %r" % (port.getHost(),))
endpoint = self.endpoints.client(reactor, port.getHost())
lostConnectionDeferred = Deferred()
protocol = lambda: ClosingLaterProtocol(lostConnectionDeferred)
client = endpoint.connect(ClientFactory.forProtocol(protocol))
def write(proto):
msg("About to write to %r" % (proto,))
proto.transport.write(b'x')
client.addCallbacks(write, lostConnectionDeferred.errback)
def disconnected(proto):
msg("%r disconnected" % (proto,))
proto.transport.write(b"some bytes to get lost")
proto.transport.writeSequence([b"some", b"more"])
finished.append(True)
lostConnectionDeferred.addCallback(disconnected)
serverConnectionLostDeferred.addCallback(disconnected)
return gatherResults([lostConnectionDeferred,
serverConnectionLostDeferred])
def onListen():
portDeferred.addCallback(listening)
portDeferred.addErrback(err)
portDeferred.addCallback(lambda ignored: reactor.stop())
needsRunningReactor(reactor, onListen)
self.runReactor(reactor)
self.assertEqual(finished, [True, True])
def test_protocolGarbageAfterLostConnection(self):
"""
After the connection a protocol is being used for is closed, the
reactor discards all of its references to the protocol.
"""
lostConnectionDeferred = Deferred()
clientProtocol = ClosingLaterProtocol(lostConnectionDeferred)
clientRef = ref(clientProtocol)
reactor = self.buildReactor()
portDeferred = self.endpoints.server(reactor).listen(
ServerFactory.forProtocol(Protocol))
def listening(port):
msg("Listening on %r" % (port.getHost(),))
endpoint = self.endpoints.client(reactor, port.getHost())
client = endpoint.connect(
ClientFactory.forProtocol(lambda: clientProtocol))
def disconnect(proto):
msg("About to disconnect %r" % (proto,))
proto.transport.loseConnection()
client.addCallback(disconnect)
client.addErrback(lostConnectionDeferred.errback)
return lostConnectionDeferred
def onListening():
portDeferred.addCallback(listening)
portDeferred.addErrback(err)
portDeferred.addBoth(lambda ignored: reactor.stop())
needsRunningReactor(reactor, onListening)
self.runReactor(reactor)
# Drop the reference and get the garbage collector to tell us if there
# are no references to the protocol instance left in the reactor.
clientProtocol = None
collect()
self.assertIsNone(clientRef())
class LogObserverMixin(object):
"""
Mixin for L{TestCase} subclasses which want to observe log events.
"""
def observe(self):
loggedMessages = []
log.addObserver(loggedMessages.append)
self.addCleanup(log.removeObserver, loggedMessages.append)
return loggedMessages
class BrokenContextFactory(object):
"""
A context factory with a broken C{getContext} method, for exercising the
error handling for such a case.
"""
message = "Some path was wrong maybe"
def getContext(self):
raise ValueError(self.message)
class StreamClientTestsMixin(object):
"""
This mixin defines tests applicable to SOCK_STREAM client implementations.
This must be mixed in to a L{ReactorBuilder
<twisted.internet.test.reactormixins.ReactorBuilder>} subclass, as it
depends on several of its methods.
Then the methods C{connect} and C{listen} must defined, defining a client
and a server communicating with each other.
"""
def test_interface(self):
"""
The C{connect} method returns an object providing L{IConnector}.
"""
reactor = self.buildReactor()
connector = self.connect(reactor, ClientFactory())
self.assertTrue(verifyObject(IConnector, connector))
def test_clientConnectionFailedStopsReactor(self):
"""
The reactor can be stopped by a client factory's
C{clientConnectionFailed} method.
"""
reactor = self.buildReactor()
needsRunningReactor(
reactor, lambda: self.connect(reactor, Stop(reactor)))
self.runReactor(reactor)
def test_connectEvent(self):
"""
This test checks that we correctly get notifications event for a
client. This ought to prevent a regression under Windows using the
GTK2 reactor. See #3925.
"""
reactor = self.buildReactor()
self.listen(reactor, ServerFactory.forProtocol(Protocol))
connected = []
class CheckConnection(Protocol):
def connectionMade(self):
connected.append(self)
reactor.stop()
clientFactory = Stop(reactor)
clientFactory.protocol = CheckConnection
needsRunningReactor(
reactor, lambda: self.connect(reactor, clientFactory))
reactor.run()
self.assertTrue(connected)
def test_unregisterProducerAfterDisconnect(self):
"""
If a producer is unregistered from a transport after the transport has
been disconnected (by the peer) and after C{loseConnection} has been
called, the transport is not re-added to the reactor as a writer as
would be necessary if the transport were still connected.
"""
reactor = self.buildReactor()
self.listen(reactor, ServerFactory.forProtocol(ClosingProtocol))
finished = Deferred()
finished.addErrback(log.err)
finished.addCallback(lambda ign: reactor.stop())
writing = []
class ClientProtocol(Protocol):
"""
Protocol to connect, register a producer, try to lose the
connection, wait for the server to disconnect from us, and then
unregister the producer.
"""
def connectionMade(self):
log.msg("ClientProtocol.connectionMade")
self.transport.registerProducer(
_SimplePullProducer(self.transport), False)
self.transport.loseConnection()
def connectionLost(self, reason):
log.msg("ClientProtocol.connectionLost")
self.unregister()
writing.append(self.transport in _getWriters(reactor))
finished.callback(None)
def unregister(self):
log.msg("ClientProtocol unregister")
self.transport.unregisterProducer()
clientFactory = ClientFactory()
clientFactory.protocol = ClientProtocol
self.connect(reactor, clientFactory)
self.runReactor(reactor)
self.assertFalse(writing[0],
"Transport was writing after unregisterProducer.")
def test_disconnectWhileProducing(self):
"""
If C{loseConnection} is called while a producer is registered with the
transport, the connection is closed after the producer is unregistered.
"""
reactor = self.buildReactor()
# For some reason, pyobject/pygtk will not deliver the close
# notification that should happen after the unregisterProducer call in
# this test. The selectable is in the write notification set, but no
# notification ever arrives. Probably for the same reason #5233 led
# win32eventreactor to be broken.
skippedReactors = ["Glib2Reactor", "Gtk2Reactor"]
reactorClassName = reactor.__class__.__name__
if reactorClassName in skippedReactors and platform.isWindows():
raise SkipTest(
"A pygobject/pygtk bug disables this functionality "
"on Windows.")
class Producer:
def resumeProducing(self):
log.msg("Producer.resumeProducing")
self.listen(reactor, ServerFactory.forProtocol(Protocol))
finished = Deferred()
finished.addErrback(log.err)
finished.addCallback(lambda ign: reactor.stop())
class ClientProtocol(Protocol):
"""
Protocol to connect, register a producer, try to lose the
connection, unregister the producer, and wait for the connection to
actually be lost.
"""
def connectionMade(self):
log.msg("ClientProtocol.connectionMade")
self.transport.registerProducer(Producer(), False)
self.transport.loseConnection()
# Let the reactor tick over, in case synchronously calling
# loseConnection and then unregisterProducer is the same as
# synchronously calling unregisterProducer and then
# loseConnection (as it is in several reactors).
reactor.callLater(0, reactor.callLater, 0, self.unregister)
def unregister(self):
log.msg("ClientProtocol unregister")
self.transport.unregisterProducer()
# This should all be pretty quick. Fail the test
# if we don't get a connectionLost event really
# soon.
reactor.callLater(
1.0, finished.errback,
Failure(Exception("Connection was not lost")))
def connectionLost(self, reason):
log.msg("ClientProtocol.connectionLost")
finished.callback(None)
clientFactory = ClientFactory()
clientFactory.protocol = ClientProtocol
self.connect(reactor, clientFactory)
self.runReactor(reactor)
# If the test failed, we logged an error already and trial
# will catch it.

View file

@ -0,0 +1,37 @@
This is a concatenation of thing1.pem and thing2.pem.
-----BEGIN CERTIFICATE-----
MIICwjCCAisCAgTSMA0GCSqGSIb3DQEBBAUAMIGoMREwDwYDVQQLEwhTZWN1cml0
eTEcMBoGA1UEChMTVHdpc3RlZCBNYXRyaXggTGFiczEeMBwGA1UEAxMVZmFrZS1j
YS0xLmV4YW1wbGUuY29tMREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMx
IjAgBgkqhkiG9w0BCQEWE25vcmVwbHlAZXhhbXBsZS5jb20xETAPBgNVBAcTCE5l
dyBZb3JrMB4XDTEwMDkyMTAxMjUxNFoXDTExMDkyMTAxMjUxNFowgagxETAPBgNV
BAsTCFNlY3VyaXR5MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMR4wHAYD
VQQDExVmYWtlLWNhLTEuZXhhbXBsZS5jb20xETAPBgNVBAgTCE5ldyBZb3JrMQsw
CQYDVQQGEwJVUzEiMCAGCSqGSIb3DQEJARYTbm9yZXBseUBleGFtcGxlLmNvbTER
MA8GA1UEBxMITmV3IFlvcmswgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALRb
VqC0CsaFgq1vbwPfs8zoP3ZYC/0sPMv0RJN+f3Dc7Q6YgNHS7o7TM3uAy/McADeW
rwVuNJGe9k+4ZBHysmBH1sG64fHT5TlK9saPcUQqkubSWj4cKSDtVbQERWqC5Dy+
qTQeZGYoPEMlnRXgMpST04DG//Dgzi4PYqUOjwxTAgMBAAEwDQYJKoZIhvcNAQEE
BQADgYEAqNEdMXWEs8Co76wxL3/cSV3MjiAroVxJdI/3EzlnfPi1JeibbdWw31fC
bn6428KTjjfhS31zo1yHG3YNXFEJXRscwLAH7ogz5kJwZMy/oS/96EFM10bkNwkK
v+nWKN8i3t/E5TEIl3BPN8tchtWmH0rycVuzs5LwaewwR1AnUE4=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIICwjCCAisCAgTSMA0GCSqGSIb3DQEBBAUAMIGoMREwDwYDVQQLEwhTZWN1cml0
eTEcMBoGA1UEChMTVHdpc3RlZCBNYXRyaXggTGFiczEeMBwGA1UEAxMVZmFrZS1j
YS0yLmV4YW1wbGUuY29tMREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMx
IjAgBgkqhkiG9w0BCQEWE25vcmVwbHlAZXhhbXBsZS5jb20xETAPBgNVBAcTCE5l
dyBZb3JrMB4XDTEwMDkyMTAxMjUzMVoXDTExMDkyMTAxMjUzMVowgagxETAPBgNV
BAsTCFNlY3VyaXR5MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMR4wHAYD
VQQDExVmYWtlLWNhLTIuZXhhbXBsZS5jb20xETAPBgNVBAgTCE5ldyBZb3JrMQsw
CQYDVQQGEwJVUzEiMCAGCSqGSIb3DQEJARYTbm9yZXBseUBleGFtcGxlLmNvbTER
MA8GA1UEBxMITmV3IFlvcmswgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMNn
b3EcKqBedQed1qJC4uGVx8PYmn2vxL3QwCVW1w0VjpZXyhCq/2VrYBhJAXRzpfvE
dCqhtJKcdifwavUrTfr4yXu1MvWA0YuaAkj1TbmlHHQYACf3h+MPOXroYzhT72bO
FSSLDWuitj0ozR+2Fk15QwLWUxaYLmwylxXAf7vpAgMBAAEwDQYJKoZIhvcNAQEE
BQADgYEADB2N6VHHhm5M2rJqqGDXMm2dU+7abxiuN+PUygN2LXIsqdGBS6U7/rta
lJNVeRaM423c8imfuklkIBG9Msn5+xm1xIMIULoi/efActDLbsX1x6IyHQrG5aDP
/RMKBio9RjS8ajgSwyYVUZiCZBsn/T0/JS8K61YLpiv4Tg8uXmM=
-----END CERTIFICATE-----

View file

@ -0,0 +1 @@
This file is not a certificate; it is present to make sure that it will be skipped.

View file

@ -0,0 +1,26 @@
This is a self-signed certificate authority certificate to be used in tests.
It was created with the following command:
certcreate -f thing1.pem -h fake-ca-1.example.com -e noreply@example.com \
-S 1234 -o 'Twisted Matrix Labs'
'certcreate' may be obtained from <http://divmod.org/trac/wiki/DivmodEpsilon>
-----BEGIN CERTIFICATE-----
MIICwjCCAisCAgTSMA0GCSqGSIb3DQEBBAUAMIGoMREwDwYDVQQLEwhTZWN1cml0
eTEcMBoGA1UEChMTVHdpc3RlZCBNYXRyaXggTGFiczEeMBwGA1UEAxMVZmFrZS1j
YS0xLmV4YW1wbGUuY29tMREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMx
IjAgBgkqhkiG9w0BCQEWE25vcmVwbHlAZXhhbXBsZS5jb20xETAPBgNVBAcTCE5l
dyBZb3JrMB4XDTEwMDkyMTAxMjUxNFoXDTExMDkyMTAxMjUxNFowgagxETAPBgNV
BAsTCFNlY3VyaXR5MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMR4wHAYD
VQQDExVmYWtlLWNhLTEuZXhhbXBsZS5jb20xETAPBgNVBAgTCE5ldyBZb3JrMQsw
CQYDVQQGEwJVUzEiMCAGCSqGSIb3DQEJARYTbm9yZXBseUBleGFtcGxlLmNvbTER
MA8GA1UEBxMITmV3IFlvcmswgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALRb
VqC0CsaFgq1vbwPfs8zoP3ZYC/0sPMv0RJN+f3Dc7Q6YgNHS7o7TM3uAy/McADeW
rwVuNJGe9k+4ZBHysmBH1sG64fHT5TlK9saPcUQqkubSWj4cKSDtVbQERWqC5Dy+
qTQeZGYoPEMlnRXgMpST04DG//Dgzi4PYqUOjwxTAgMBAAEwDQYJKoZIhvcNAQEE
BQADgYEAqNEdMXWEs8Co76wxL3/cSV3MjiAroVxJdI/3EzlnfPi1JeibbdWw31fC
bn6428KTjjfhS31zo1yHG3YNXFEJXRscwLAH7ogz5kJwZMy/oS/96EFM10bkNwkK
v+nWKN8i3t/E5TEIl3BPN8tchtWmH0rycVuzs5LwaewwR1AnUE4=
-----END CERTIFICATE-----

View file

@ -0,0 +1,26 @@
This is a self-signed certificate authority certificate to be used in tests.
It was created with the following command:
certcreate -f thing2.pem -h fake-ca-2.example.com -e noreply@example.com \
-S 1234 -o 'Twisted Matrix Labs'
'certcreate' may be obtained from <http://divmod.org/trac/wiki/DivmodEpsilon>
-----BEGIN CERTIFICATE-----
MIICwjCCAisCAgTSMA0GCSqGSIb3DQEBBAUAMIGoMREwDwYDVQQLEwhTZWN1cml0
eTEcMBoGA1UEChMTVHdpc3RlZCBNYXRyaXggTGFiczEeMBwGA1UEAxMVZmFrZS1j
YS0yLmV4YW1wbGUuY29tMREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMx
IjAgBgkqhkiG9w0BCQEWE25vcmVwbHlAZXhhbXBsZS5jb20xETAPBgNVBAcTCE5l
dyBZb3JrMB4XDTEwMDkyMTAxMjUzMVoXDTExMDkyMTAxMjUzMVowgagxETAPBgNV
BAsTCFNlY3VyaXR5MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMR4wHAYD
VQQDExVmYWtlLWNhLTIuZXhhbXBsZS5jb20xETAPBgNVBAgTCE5ldyBZb3JrMQsw
CQYDVQQGEwJVUzEiMCAGCSqGSIb3DQEJARYTbm9yZXBseUBleGFtcGxlLmNvbTER
MA8GA1UEBxMITmV3IFlvcmswgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMNn
b3EcKqBedQed1qJC4uGVx8PYmn2vxL3QwCVW1w0VjpZXyhCq/2VrYBhJAXRzpfvE
dCqhtJKcdifwavUrTfr4yXu1MvWA0YuaAkj1TbmlHHQYACf3h+MPOXroYzhT72bO
FSSLDWuitj0ozR+2Fk15QwLWUxaYLmwylxXAf7vpAgMBAAEwDQYJKoZIhvcNAQEE
BQADgYEADB2N6VHHhm5M2rJqqGDXMm2dU+7abxiuN+PUygN2LXIsqdGBS6U7/rta
lJNVeRaM423c8imfuklkIBG9Msn5+xm1xIMIULoi/efActDLbsX1x6IyHQrG5aDP
/RMKBio9RjS8ajgSwyYVUZiCZBsn/T0/JS8K61YLpiv4Tg8uXmM=
-----END CERTIFICATE-----

View file

@ -0,0 +1,26 @@
This is a self-signed certificate authority certificate to be used in tests.
It was created with the following command:
certcreate -f thing2.pem -h fake-ca-2.example.com -e noreply@example.com \
-S 1234 -o 'Twisted Matrix Labs'
'certcreate' may be obtained from <http://divmod.org/trac/wiki/DivmodEpsilon>
-----BEGIN CERTIFICATE-----
MIICwjCCAisCAgTSMA0GCSqGSIb3DQEBBAUAMIGoMREwDwYDVQQLEwhTZWN1cml0
eTEcMBoGA1UEChMTVHdpc3RlZCBNYXRyaXggTGFiczEeMBwGA1UEAxMVZmFrZS1j
YS0yLmV4YW1wbGUuY29tMREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMx
IjAgBgkqhkiG9w0BCQEWE25vcmVwbHlAZXhhbXBsZS5jb20xETAPBgNVBAcTCE5l
dyBZb3JrMB4XDTEwMDkyMTAxMjUzMVoXDTExMDkyMTAxMjUzMVowgagxETAPBgNV
BAsTCFNlY3VyaXR5MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMR4wHAYD
VQQDExVmYWtlLWNhLTIuZXhhbXBsZS5jb20xETAPBgNVBAgTCE5ldyBZb3JrMQsw
CQYDVQQGEwJVUzEiMCAGCSqGSIb3DQEJARYTbm9yZXBseUBleGFtcGxlLmNvbTER
MA8GA1UEBxMITmV3IFlvcmswgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMNn
b3EcKqBedQed1qJC4uGVx8PYmn2vxL3QwCVW1w0VjpZXyhCq/2VrYBhJAXRzpfvE
dCqhtJKcdifwavUrTfr4yXu1MvWA0YuaAkj1TbmlHHQYACf3h+MPOXroYzhT72bO
FSSLDWuitj0ozR+2Fk15QwLWUxaYLmwylxXAf7vpAgMBAAEwDQYJKoZIhvcNAQEE
BQADgYEADB2N6VHHhm5M2rJqqGDXMm2dU+7abxiuN+PUygN2LXIsqdGBS6U7/rta
lJNVeRaM423c8imfuklkIBG9Msn5+xm1xIMIULoi/efActDLbsX1x6IyHQrG5aDP
/RMKBio9RjS8ajgSwyYVUZiCZBsn/T0/JS8K61YLpiv4Tg8uXmM=
-----END CERTIFICATE-----

View file

@ -0,0 +1,67 @@
# -*- test-case-name: twisted.internet.test.test_endpoints -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Fake client and server endpoint string parser plugins for testing purposes.
"""
from __future__ import absolute_import, division
from zope.interface.declarations import implementer
from twisted.plugin import IPlugin
from twisted.internet.interfaces import (
IStreamClientEndpoint, IStreamServerEndpoint,
IStreamServerEndpointStringParser,
IStreamClientEndpointStringParserWithReactor)
@implementer(IPlugin)
class PluginBase(object):
def __init__(self, pfx):
self.prefix = pfx
@implementer(IStreamClientEndpointStringParserWithReactor)
class FakeClientParserWithReactor(PluginBase):
def parseStreamClient(self, *a, **kw):
return StreamClient(self, a, kw)
@implementer(IStreamServerEndpointStringParser)
class FakeParser(PluginBase):
def parseStreamServer(self, *a, **kw):
return StreamServer(self, a, kw)
class EndpointBase(object):
def __init__(self, parser, args, kwargs):
self.parser = parser
self.args = args
self.kwargs = kwargs
@implementer(IStreamClientEndpoint)
class StreamClient(EndpointBase):
pass
@implementer(IStreamServerEndpoint)
class StreamServer(EndpointBase):
pass
# Instantiate plugin interface providers to register them.
fake = FakeParser('fake')
fakeClientWithReactor = FakeClientParserWithReactor('crfake')
fakeClientWithReactorAndPreference = FakeClientParserWithReactor('cpfake')

View file

@ -0,0 +1,67 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Testing helpers related to the module system.
"""
from __future__ import division, absolute_import
__all__ = ['NoReactor', 'AlternateReactor']
import sys
import twisted.internet
from twisted.test.test_twisted import SetAsideModule
class NoReactor(SetAsideModule):
"""
Context manager that uninstalls the reactor, if any, and then restores it
afterwards.
"""
def __init__(self):
SetAsideModule.__init__(self, "twisted.internet.reactor")
def __enter__(self):
SetAsideModule.__enter__(self)
if "twisted.internet.reactor" in self.modules:
del twisted.internet.reactor
def __exit__(self, excType, excValue, traceback):
SetAsideModule.__exit__(self, excType, excValue, traceback)
# Clean up 'reactor' attribute that may have been set on
# twisted.internet:
reactor = self.modules.get("twisted.internet.reactor", None)
if reactor is not None:
twisted.internet.reactor = reactor
else:
try:
del twisted.internet.reactor
except AttributeError:
pass
class AlternateReactor(NoReactor):
"""
A context manager which temporarily installs a different object as the
global reactor.
"""
def __init__(self, reactor):
"""
@param reactor: Any object to install as the global reactor.
"""
NoReactor.__init__(self)
self.alternate = reactor
def __enter__(self):
NoReactor.__enter__(self)
twisted.internet.reactor = self.alternate
sys.modules['twisted.internet.reactor'] = self.alternate

View file

@ -0,0 +1,27 @@
from __future__ import absolute_import, division
import sys
import os
try:
# On Windows, stdout is not opened in binary mode by default,
# so newline characters are munged on writing, interfering with
# the tests.
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
except ImportError:
pass
# Loop over each of the arguments given and print it to stdout
for arg in sys.argv[1:]:
res = arg + chr(0)
if sys.version_info < (3, 0):
stdout = sys.stdout
else:
stdout = sys.stdout.buffer
res = res.encode(sys.getfilesystemencoding(), "surrogateescape")
stdout.write(res)
stdout.flush()

View file

@ -0,0 +1,6 @@
import os, sys
while 1:
line = sys.stdin.readline().strip()
if not line:
break
os.close(int(line))

View file

@ -0,0 +1,21 @@
import sys
# Override theSystemPath so it throws KeyError on gi.pygtkcompat:
from twisted.python import modules
modules.theSystemPath = modules.PythonPath([], moduleDict={})
# Now, when we import gireactor it shouldn't use pygtkcompat, and should
# instead prevent gobject from being importable:
from twisted.internet import gireactor
for name in gireactor._PYGTK_MODULES:
if sys.modules[name] is not None:
sys.stdout.write("failure, sys.modules[%r] is %r, instead of None" %
(name, sys.modules["gobject"]))
sys.exit(0)
try:
import gobject
except ImportError:
sys.stdout.write("success")
else:
sys.stdout.write("failure: %s was imported" % (gobject.__path__,))

View file

@ -0,0 +1,33 @@
# A program which exits after starting a child which inherits its
# stdin/stdout/stderr and keeps them open until stdin is closed.
import sys, os
def grandchild():
sys.stdout.write('grandchild started')
sys.stdout.flush()
sys.stdin.read()
def main():
if sys.argv[1] == 'child':
if sys.argv[2] == 'windows':
import win32api as api, win32process as proc
info = proc.STARTUPINFO()
info.hStdInput = api.GetStdHandle(api.STD_INPUT_HANDLE)
info.hStdOutput = api.GetStdHandle(api.STD_OUTPUT_HANDLE)
info.hStdError = api.GetStdHandle(api.STD_ERROR_HANDLE)
python = sys.executable
scriptDir = os.path.dirname(__file__)
scriptName = os.path.basename(__file__)
proc.CreateProcess(
None, " ".join((python, scriptName, "grandchild")), None,
None, 1, 0, os.environ, scriptDir, info)
else:
if os.fork() == 0:
grandchild()
else:
grandchild()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,350 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Utilities for unit testing reactor implementations.
The main feature of this module is L{ReactorBuilder}, a base class for use when
writing interface/blackbox tests for reactor implementations. Test case classes
for reactor features should subclass L{ReactorBuilder} instead of
L{SynchronousTestCase}. All of the features of L{SynchronousTestCase} will be
available. Additionally, the tests will automatically be applied to all
available reactor implementations.
"""
from __future__ import division, absolute_import
__metaclass__ = type
__all__ = ['TestTimeoutError', 'ReactorBuilder', 'needsRunningReactor']
import os, signal, time
from twisted.trial.unittest import SynchronousTestCase, SkipTest
from twisted.trial.util import DEFAULT_TIMEOUT_DURATION, acquireAttribute
from twisted.python.runtime import platform
from twisted.python.reflect import namedAny
from twisted.python.deprecate import _fullyQualifiedName as fullyQualifiedName
from twisted.python import log
from twisted.python.failure import Failure
from twisted.python.compat import _PY3
# Access private APIs.
if platform.isWindows():
process = None
else:
from twisted.internet import process
class TestTimeoutError(Exception):
"""
The reactor was still running after the timeout period elapsed in
L{ReactorBuilder.runReactor}.
"""
def needsRunningReactor(reactor, thunk):
"""
Various functions within these tests need an already-running reactor at
some point. They need to stop the reactor when the test has completed, and
that means calling reactor.stop(). However, reactor.stop() raises an
exception if the reactor isn't already running, so if the L{Deferred} that
a particular API under test returns fires synchronously (as especially an
endpoint's C{connect()} method may do, if the connect is to a local
interface address) then the test won't be able to stop the reactor being
tested and finish. So this calls C{thunk} only once C{reactor} is running.
(This is just an alias for
L{twisted.internet.interfaces.IReactorCore.callWhenRunning} on the given
reactor parameter, in order to centrally reference the above paragraph and
repeating it everywhere as a comment.)
@param reactor: the L{twisted.internet.interfaces.IReactorCore} under test
@param thunk: a 0-argument callable, which eventually finishes the test in
question, probably in a L{Deferred} callback.
"""
reactor.callWhenRunning(thunk)
def stopOnError(case, reactor, publisher=None):
"""
Stop the reactor as soon as any error is logged on the given publisher.
This is beneficial for tests which will wait for a L{Deferred} to fire
before completing (by passing or failing). Certain implementation bugs may
prevent the L{Deferred} from firing with any result at all (consider a
protocol's {dataReceived} method that raises an exception: this exception
is logged but it won't ever cause a L{Deferred} to fire). In that case the
test would have to complete by timing out which is a much less desirable
outcome than completing as soon as the unexpected error is encountered.
@param case: A L{SynchronousTestCase} to use to clean up the necessary log
observer when the test is over.
@param reactor: The reactor to stop.
@param publisher: A L{LogPublisher} to watch for errors. If L{None}, the
global log publisher will be watched.
"""
if publisher is None:
from twisted.python import log as publisher
running = [None]
def stopIfError(event):
if running and event.get('isError'):
running.pop()
reactor.stop()
publisher.addObserver(stopIfError)
case.addCleanup(publisher.removeObserver, stopIfError)
class ReactorBuilder:
"""
L{SynchronousTestCase} mixin which provides a reactor-creation API. This
mixin defines C{setUp} and C{tearDown}, so mix it in before
L{SynchronousTestCase} or call its methods from the overridden ones in the
subclass.
@cvar skippedReactors: A dict mapping FQPN strings of reactors for
which the tests defined by this class will be skipped to strings
giving the skip message.
@cvar requiredInterfaces: A C{list} of interfaces which the reactor must
provide or these tests will be skipped. The default, L{None}, means
that no interfaces are required.
@ivar reactorFactory: A no-argument callable which returns the reactor to
use for testing.
@ivar originalHandler: The SIGCHLD handler which was installed when setUp
ran and which will be re-installed when tearDown runs.
@ivar _reactors: A list of FQPN strings giving the reactors for which
L{SynchronousTestCase}s will be created.
"""
_reactors = [
# Select works everywhere
"twisted.internet.selectreactor.SelectReactor",
]
if platform.isWindows():
# PortableGtkReactor is only really interesting on Windows,
# but not really Windows specific; if you want you can
# temporarily move this up to the all-platforms list to test
# it on other platforms. It's not there in general because
# it's not _really_ worth it to support on other platforms,
# since no one really wants to use it on other platforms.
_reactors.extend([
"twisted.internet.gtk2reactor.PortableGtkReactor",
"twisted.internet.gireactor.PortableGIReactor",
"twisted.internet.gtk3reactor.PortableGtk3Reactor",
"twisted.internet.win32eventreactor.Win32Reactor",
"twisted.internet.iocpreactor.reactor.IOCPReactor"])
else:
_reactors.extend([
"twisted.internet.glib2reactor.Glib2Reactor",
"twisted.internet.gtk2reactor.Gtk2Reactor",
"twisted.internet.gireactor.GIReactor",
"twisted.internet.gtk3reactor.Gtk3Reactor"])
if _PY3:
_reactors.append(
"twisted.internet.asyncioreactor.AsyncioSelectorReactor")
if platform.isMacOSX():
_reactors.append("twisted.internet.cfreactor.CFReactor")
else:
_reactors.extend([
"twisted.internet.pollreactor.PollReactor",
"twisted.internet.epollreactor.EPollReactor"])
if not platform.isLinux():
# Presumably Linux is not going to start supporting kqueue, so
# skip even trying this configuration.
_reactors.extend([
# Support KQueue on non-OS-X POSIX platforms for now.
"twisted.internet.kqreactor.KQueueReactor",
])
reactorFactory = None
originalHandler = None
requiredInterfaces = None
skippedReactors = {}
def setUp(self):
"""
Clear the SIGCHLD handler, if there is one, to ensure an environment
like the one which exists prior to a call to L{reactor.run}.
"""
if not platform.isWindows():
self.originalHandler = signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def tearDown(self):
"""
Restore the original SIGCHLD handler and reap processes as long as
there seem to be any remaining.
"""
if self.originalHandler is not None:
signal.signal(signal.SIGCHLD, self.originalHandler)
if process is not None:
begin = time.time()
while process.reapProcessHandlers:
log.msg(
"ReactorBuilder.tearDown reaping some processes %r" % (
process.reapProcessHandlers,))
process.reapAllProcesses()
# The process should exit on its own. However, if it
# doesn't, we're stuck in this loop forever. To avoid
# hanging the test suite, eventually give the process some
# help exiting and move on.
time.sleep(0.001)
if time.time() - begin > 60:
for pid in process.reapProcessHandlers:
os.kill(pid, signal.SIGKILL)
raise Exception(
"Timeout waiting for child processes to exit: %r" % (
process.reapProcessHandlers,))
def unbuildReactor(self, reactor):
"""
Clean up any resources which may have been allocated for the given
reactor by its creation or by a test which used it.
"""
# Chris says:
#
# XXX These explicit calls to clean up the waker (and any other
# internal readers) should become obsolete when bug #3063 is
# fixed. -radix, 2008-02-29. Fortunately it should probably cause an
# error when bug #3063 is fixed, so it should be removed in the same
# branch that fixes it.
#
# -exarkun
reactor._uninstallHandler()
if getattr(reactor, '_internalReaders', None) is not None:
for reader in reactor._internalReaders:
reactor.removeReader(reader)
reader.connectionLost(None)
reactor._internalReaders.clear()
# Here's an extra thing unrelated to wakers but necessary for
# cleaning up after the reactors we make. -exarkun
reactor.disconnectAll()
# It would also be bad if any timed calls left over were allowed to
# run.
calls = reactor.getDelayedCalls()
for c in calls:
c.cancel()
def buildReactor(self):
"""
Create and return a reactor using C{self.reactorFactory}.
"""
try:
from twisted.internet.cfreactor import CFReactor
from twisted.internet import reactor as globalReactor
except ImportError:
pass
else:
if (isinstance(globalReactor, CFReactor)
and self.reactorFactory is CFReactor):
raise SkipTest(
"CFReactor uses APIs which manipulate global state, "
"so it's not safe to run its own reactor-builder tests "
"under itself")
try:
reactor = self.reactorFactory()
except:
# Unfortunately, not all errors which result in a reactor
# being unusable are detectable without actually
# instantiating the reactor. So we catch some more here
# and skip the test if necessary. We also log it to aid
# with debugging, but flush the logged error so the test
# doesn't fail.
log.err(None, "Failed to install reactor")
self.flushLoggedErrors()
raise SkipTest(Failure().getErrorMessage())
else:
if self.requiredInterfaces is not None:
missing = [
required for required in self.requiredInterfaces
if not required.providedBy(reactor)]
if missing:
self.unbuildReactor(reactor)
raise SkipTest("%s does not provide %s" % (
fullyQualifiedName(reactor.__class__),
",".join([fullyQualifiedName(x) for x in missing])))
self.addCleanup(self.unbuildReactor, reactor)
return reactor
def getTimeout(self):
"""
Determine how long to run the test before considering it failed.
@return: A C{int} or C{float} giving a number of seconds.
"""
return acquireAttribute(self._parents, 'timeout', DEFAULT_TIMEOUT_DURATION)
def runReactor(self, reactor, timeout=None):
"""
Run the reactor for at most the given amount of time.
@param reactor: The reactor to run.
@type timeout: C{int} or C{float}
@param timeout: The maximum amount of time, specified in seconds, to
allow the reactor to run. If the reactor is still running after
this much time has elapsed, it will be stopped and an exception
raised. If L{None}, the default test method timeout imposed by
Trial will be used. This depends on the L{IReactorTime}
implementation of C{reactor} for correct operation.
@raise TestTimeoutError: If the reactor is still running after
C{timeout} seconds.
"""
if timeout is None:
timeout = self.getTimeout()
timedOut = []
def stop():
timedOut.append(None)
reactor.stop()
timedOutCall = reactor.callLater(timeout, stop)
reactor.run()
if timedOut:
raise TestTimeoutError(
"reactor still running after %s seconds" % (timeout,))
else:
timedOutCall.cancel()
def makeTestCaseClasses(cls):
"""
Create a L{SynchronousTestCase} subclass which mixes in C{cls} for each
known reactor and return a dict mapping their names to them.
"""
classes = {}
for reactor in cls._reactors:
shortReactorName = reactor.split(".")[-1]
name = (cls.__name__ + "." + shortReactorName + "Tests").replace(".", "_")
class testcase(cls, SynchronousTestCase):
__module__ = cls.__module__
if reactor in cls.skippedReactors:
skip = cls.skippedReactors[reactor]
try:
reactorFactory = namedAny(reactor)
except:
skip = Failure().getErrorMessage()
testcase.__name__ = name
if hasattr(cls, "__qualname__"):
testcase.__qualname__ = ".".join(cls.__qualname__.split()[0:-1] + [name])
classes[testcase.__name__] = testcase
return classes
makeTestCaseClasses = classmethod(makeTestCaseClasses)

View file

@ -0,0 +1,69 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.abstract}, a collection of APIs for implementing
reactors.
"""
from __future__ import division, absolute_import
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet.abstract import isIPv6Address
class IPv6AddressTests(SynchronousTestCase):
"""
Tests for L{isIPv6Address}, a function for determining if a particular
string is an IPv6 address literal.
"""
def test_empty(self):
"""
The empty string is not an IPv6 address literal.
"""
self.assertFalse(isIPv6Address(""))
def test_colon(self):
"""
A single C{":"} is not an IPv6 address literal.
"""
self.assertFalse(isIPv6Address(":"))
def test_loopback(self):
"""
C{"::1"} is the IPv6 loopback address literal.
"""
self.assertTrue(isIPv6Address("::1"))
def test_scopeID(self):
"""
An otherwise valid IPv6 address literal may also include a C{"%"}
followed by an arbitrary scope identifier.
"""
self.assertTrue(isIPv6Address("fe80::1%eth0"))
self.assertTrue(isIPv6Address("fe80::2%1"))
self.assertTrue(isIPv6Address("fe80::3%en2"))
def test_invalidWithScopeID(self):
"""
An otherwise invalid IPv6 address literal is still invalid with a
trailing scope identifier.
"""
self.assertFalse(isIPv6Address("%eth0"))
self.assertFalse(isIPv6Address(":%eth0"))
self.assertFalse(isIPv6Address("hello%eth0"))
def test_unicodeAndBytes(self):
"""
L{isIPv6Address} evaluates ASCII-encoded bytes as well as text.
"""
self.assertTrue(isIPv6Address(b"fe80::2%1"))
self.assertTrue(isIPv6Address(u"fe80::2%1"))
self.assertFalse(isIPv6Address(u"\u4321"))
self.assertFalse(isIPv6Address(u"hello%eth0"))
self.assertFalse(isIPv6Address(b"hello%eth0"))

View file

@ -0,0 +1,276 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import division, absolute_import
import os
import socket
from twisted.trial import unittest
from twisted.internet.address import IPv4Address, UNIXAddress, IPv6Address
from twisted.internet.address import HostnameAddress
from twisted.python.compat import nativeString
from twisted.python.runtime import platform
if not platform._supportsSymlinks():
symlinkSkip = "Platform does not support symlinks"
else:
symlinkSkip = None
try:
socket.AF_UNIX
except AttributeError:
unixSkip = "Platform doesn't support UNIX sockets."
else:
unixSkip = None
class AddressTestCaseMixin(object):
def test_addressComparison(self):
"""
Two different address instances, sharing the same properties are
considered equal by C{==} and not considered not equal by C{!=}.
Note: When applied via UNIXAddress class, this uses the same
filename for both objects being compared.
"""
self.assertTrue(self.buildAddress() == self.buildAddress())
self.assertFalse(self.buildAddress() != self.buildAddress())
def test_hash(self):
"""
C{__hash__} can be used to get a hash of an address, allowing
addresses to be used as keys in dictionaries, for instance.
"""
addr = self.buildAddress()
d = {addr: True}
self.assertTrue(d[self.buildAddress()])
def test_differentNamesComparison(self):
"""
Check that comparison operators work correctly on address objects
when a different name is passed in
"""
self.assertFalse(self.buildAddress() == self.buildDifferentAddress())
self.assertFalse(self.buildDifferentAddress() == self.buildAddress())
self.assertTrue(self.buildAddress() != self.buildDifferentAddress())
self.assertTrue(self.buildDifferentAddress() != self.buildAddress())
def assertDeprecations(self, testMethod, message):
"""
Assert that the a DeprecationWarning with the given message was
emitted against the given method.
"""
warnings = self.flushWarnings([testMethod])
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(warnings[0]['message'], message)
self.assertEqual(len(warnings), 1)
class IPv4AddressTestCaseMixin(AddressTestCaseMixin):
addressArgSpec = (("type", "%s"), ("host", "%r"), ("port", "%d"))
class HostnameAddressTests(unittest.TestCase, AddressTestCaseMixin):
"""
Test case for L{HostnameAddress}.
"""
addressArgSpec = (("hostname", "%s"), ("port", "%d"))
def buildAddress(self):
"""
Create an arbitrary new L{HostnameAddress} instance.
@return: A L{HostnameAddress} instance.
"""
return HostnameAddress(b"example.com", 0)
def buildDifferentAddress(self):
"""
Like L{buildAddress}, but with a different hostname.
@return: A L{HostnameAddress} instance.
"""
return HostnameAddress(b"example.net", 0)
class IPv4AddressTCPTests(unittest.SynchronousTestCase,
IPv4AddressTestCaseMixin):
def buildAddress(self):
"""
Create an arbitrary new L{IPv4Address} instance with a C{"TCP"}
type. A new instance is created for each call, but always for the
same address.
"""
return IPv4Address("TCP", "127.0.0.1", 0)
def buildDifferentAddress(self):
"""
Like L{buildAddress}, but with a different fixed address.
"""
return IPv4Address("TCP", "127.0.0.2", 0)
class IPv4AddressUDPTests(unittest.SynchronousTestCase,
IPv4AddressTestCaseMixin):
def buildAddress(self):
"""
Create an arbitrary new L{IPv4Address} instance with a C{"UDP"}
type. A new instance is created for each call, but always for the
same address.
"""
return IPv4Address("UDP", "127.0.0.1", 0)
def buildDifferentAddress(self):
"""
Like L{buildAddress}, but with a different fixed address.
"""
return IPv4Address("UDP", "127.0.0.2", 0)
class IPv6AddressTests(unittest.SynchronousTestCase, AddressTestCaseMixin):
addressArgSpec = (("type", "%s"), ("host", "%r"), ("port", "%d"))
def buildAddress(self):
"""
Create an arbitrary new L{IPv6Address} instance with a C{"TCP"}
type. A new instance is created for each call, but always for the
same address.
"""
return IPv6Address("TCP", "::1", 0)
def buildDifferentAddress(self):
"""
Like L{buildAddress}, but with a different fixed address.
"""
return IPv6Address("TCP", "::2", 0)
class UNIXAddressTests(unittest.SynchronousTestCase):
skip = unixSkip
addressArgSpec = (("name", "%r"),)
def setUp(self):
self._socketAddress = self.mktemp()
self._otherAddress = self.mktemp()
def buildAddress(self):
"""
Create an arbitrary new L{UNIXAddress} instance. A new instance is
created for each call, but always for the same address.
"""
return UNIXAddress(self._socketAddress)
def buildDifferentAddress(self):
"""
Like L{buildAddress}, but with a different fixed address.
"""
return UNIXAddress(self._otherAddress)
def test_repr(self):
"""
The repr of L{UNIXAddress} returns with the filename that the
L{UNIXAddress} is for.
"""
self.assertEqual(repr(self.buildAddress()), "UNIXAddress('%s')" % (
nativeString(self._socketAddress)))
def test_comparisonOfLinkedFiles(self):
"""
UNIXAddress objects compare as equal if they link to the same file.
"""
linkName = self.mktemp()
with open(self._socketAddress, 'w') as self.fd:
os.symlink(os.path.abspath(self._socketAddress), linkName)
self.assertEqual(UNIXAddress(self._socketAddress),
UNIXAddress(linkName))
self.assertEqual(UNIXAddress(linkName),
UNIXAddress(self._socketAddress))
if not unixSkip:
test_comparisonOfLinkedFiles.skip = symlinkSkip
def test_hashOfLinkedFiles(self):
"""
UNIXAddress Objects that compare as equal have the same hash value.
"""
linkName = self.mktemp()
with open(self._socketAddress, 'w') as self.fd:
os.symlink(os.path.abspath(self._socketAddress), linkName)
self.assertEqual(hash(UNIXAddress(self._socketAddress)),
hash(UNIXAddress(linkName)))
if not unixSkip:
test_hashOfLinkedFiles.skip = symlinkSkip
class EmptyUNIXAddressTests(unittest.SynchronousTestCase,
AddressTestCaseMixin):
"""
Tests for L{UNIXAddress} operations involving a L{None} address.
"""
skip = unixSkip
addressArgSpec = (("name", "%r"),)
def setUp(self):
self._socketAddress = self.mktemp()
def buildAddress(self):
"""
Create an arbitrary new L{UNIXAddress} instance. A new instance is
created for each call, but always for the same address. This builds it
with a fixed address of L{None}.
"""
return UNIXAddress(None)
def buildDifferentAddress(self):
"""
Like L{buildAddress}, but with a random temporary directory.
"""
return UNIXAddress(self._socketAddress)
def test_comparisonOfLinkedFiles(self):
"""
A UNIXAddress referring to a L{None} address does not compare equal to a
UNIXAddress referring to a symlink.
"""
linkName = self.mktemp()
with open(self._socketAddress, 'w') as self.fd:
os.symlink(os.path.abspath(self._socketAddress), linkName)
self.assertNotEqual(UNIXAddress(self._socketAddress),
UNIXAddress(None))
self.assertNotEqual(UNIXAddress(None),
UNIXAddress(self._socketAddress))
if not unixSkip:
test_comparisonOfLinkedFiles.skip = symlinkSkip
def test_emptyHash(self):
"""
C{__hash__} can be used to get a hash of an address, even one referring
to L{None} rather than a real path.
"""
addr = self.buildAddress()
d = {addr: True}
self.assertTrue(d[self.buildAddress()])

View file

@ -0,0 +1,48 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.asyncioreactor}.
"""
from twisted.trial.unittest import SynchronousTestCase
from .reactormixins import ReactorBuilder
try:
from twisted.internet.asyncioreactor import AsyncioSelectorReactor
import asyncio
except ImportError:
AsyncioSelectorReactor = None
skipReason = "Requires asyncio."
class AsyncioSelectorReactorTests(ReactorBuilder, SynchronousTestCase):
"""
L{AsyncioSelectorReactor} tests.
"""
if AsyncioSelectorReactor is None:
skip = skipReason
def test_defaultEventLoopFromGlobalPolicy(self):
"""
L{AsyncioSelectorReactor} wraps the global policy's event loop
by default. This ensures that L{asyncio.Future}s and
coroutines created by library code that uses
L{asyncio.get_event_loop} are bound to the same loop.
"""
reactor = AsyncioSelectorReactor()
future = asyncio.Future()
result = []
def completed(future):
result.append(future.result())
reactor.stop()
future.add_done_callback(completed)
future.set_result(True)
self.assertEqual(result, [])
self.runReactor(reactor, timeout=1)
self.assertEqual(result, [True])

View file

@ -0,0 +1,450 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.base}.
"""
import socket
try:
from Queue import Queue
except ImportError:
from queue import Queue
from zope.interface import implementer
from twisted.python.threadpool import ThreadPool
from twisted.internet.interfaces import (IReactorTime, IReactorThreads,
IResolverSimple)
from twisted.internet.error import DNSLookupError
from twisted.internet._resolver import FirstOneWins
from twisted.internet.defer import Deferred
from twisted.internet.base import ThreadedResolver, DelayedCall, ReactorBase
from twisted.internet.task import Clock
from twisted.trial.unittest import TestCase, SkipTest
@implementer(IReactorTime, IReactorThreads)
class FakeReactor(object):
"""
A fake reactor implementation which just supports enough reactor APIs for
L{ThreadedResolver}.
"""
def __init__(self):
self._clock = Clock()
self.callLater = self._clock.callLater
self._threadpool = ThreadPool()
self._threadpool.start()
self.getThreadPool = lambda: self._threadpool
self._threadCalls = Queue()
def callFromThread(self, f, *args, **kwargs):
self._threadCalls.put((f, args, kwargs))
def _runThreadCalls(self):
f, args, kwargs = self._threadCalls.get()
f(*args, **kwargs)
def _stop(self):
self._threadpool.stop()
class ThreadedResolverTests(TestCase):
"""
Tests for L{ThreadedResolver}.
"""
def test_success(self):
"""
L{ThreadedResolver.getHostByName} returns a L{Deferred} which fires
with the value returned by the call to L{socket.gethostbyname} in the
threadpool of the reactor passed to L{ThreadedResolver.__init__}.
"""
ip = "10.0.0.17"
name = "foo.bar.example.com"
timeout = 30
reactor = FakeReactor()
self.addCleanup(reactor._stop)
lookedUp = []
resolvedTo = []
def fakeGetHostByName(name):
lookedUp.append(name)
return ip
self.patch(socket, 'gethostbyname', fakeGetHostByName)
resolver = ThreadedResolver(reactor)
d = resolver.getHostByName(name, (timeout,))
d.addCallback(resolvedTo.append)
reactor._runThreadCalls()
self.assertEqual(lookedUp, [name])
self.assertEqual(resolvedTo, [ip])
# Make sure that any timeout-related stuff gets cleaned up.
reactor._clock.advance(timeout + 1)
self.assertEqual(reactor._clock.calls, [])
def test_failure(self):
"""
L{ThreadedResolver.getHostByName} returns a L{Deferred} which fires a
L{Failure} if the call to L{socket.gethostbyname} raises an exception.
"""
timeout = 30
reactor = FakeReactor()
self.addCleanup(reactor._stop)
def fakeGetHostByName(name):
raise IOError("ENOBUFS (this is a funny joke)")
self.patch(socket, 'gethostbyname', fakeGetHostByName)
failedWith = []
resolver = ThreadedResolver(reactor)
d = resolver.getHostByName("some.name", (timeout,))
self.assertFailure(d, DNSLookupError)
d.addCallback(failedWith.append)
reactor._runThreadCalls()
self.assertEqual(len(failedWith), 1)
# Make sure that any timeout-related stuff gets cleaned up.
reactor._clock.advance(timeout + 1)
self.assertEqual(reactor._clock.calls, [])
def test_timeout(self):
"""
If L{socket.gethostbyname} does not complete before the specified
timeout elapsed, the L{Deferred} returned by
L{ThreadedResolver.getHostByName} fails with L{DNSLookupError}.
"""
timeout = 10
reactor = FakeReactor()
self.addCleanup(reactor._stop)
result = Queue()
def fakeGetHostByName(name):
raise result.get()
self.patch(socket, 'gethostbyname', fakeGetHostByName)
failedWith = []
resolver = ThreadedResolver(reactor)
d = resolver.getHostByName("some.name", (timeout,))
self.assertFailure(d, DNSLookupError)
d.addCallback(failedWith.append)
reactor._clock.advance(timeout - 1)
self.assertEqual(failedWith, [])
reactor._clock.advance(1)
self.assertEqual(len(failedWith), 1)
# Eventually the socket.gethostbyname does finish - in this case, with
# an exception. Nobody cares, though.
result.put(IOError("The I/O was errorful"))
def test_resolverGivenStr(self):
"""
L{ThreadedResolver.getHostByName} is passed L{str}, encoded using IDNA
if required.
"""
calls = []
@implementer(IResolverSimple)
class FakeResolver(object):
def getHostByName(self, name, timeouts=()):
calls.append(name)
return Deferred()
class JustEnoughReactor(ReactorBase):
def installWaker(self):
pass
fake = FakeResolver()
reactor = JustEnoughReactor()
reactor.installResolver(fake)
rec = FirstOneWins(Deferred())
reactor.nameResolver.resolveHostName(
rec, u"example.example")
reactor.nameResolver.resolveHostName(
rec, "example.example")
reactor.nameResolver.resolveHostName(
rec, u"v\xe4\xe4ntynyt.example")
reactor.nameResolver.resolveHostName(
rec, u"\u0440\u0444.example")
reactor.nameResolver.resolveHostName(
rec, "xn----7sbb4ac0ad0be6cf.xn--p1ai")
self.assertEqual(len(calls), 5)
self.assertEqual(list(map(type, calls)), [str]*5)
self.assertEqual("example.example", calls[0])
self.assertEqual("example.example", calls[1])
self.assertEqual("xn--vntynyt-5waa.example", calls[2])
self.assertEqual("xn--p1ai.example", calls[3])
self.assertEqual("xn----7sbb4ac0ad0be6cf.xn--p1ai", calls[4])
def nothing():
"""
Function used by L{DelayedCallTests.test_str}.
"""
class DelayedCallMixin(object):
"""
L{DelayedCall}
"""
def _getDelayedCallAt(self, time):
"""
Get a L{DelayedCall} instance at a given C{time}.
@param time: The absolute time at which the returned L{DelayedCall}
will be scheduled.
"""
def noop(call):
pass
return DelayedCall(time, lambda: None, (), {}, noop, noop, None)
def setUp(self):
"""
Create two L{DelayedCall} instanced scheduled to run at different
times.
"""
self.zero = self._getDelayedCallAt(0)
self.one = self._getDelayedCallAt(1)
def test_str(self):
"""
The string representation of a L{DelayedCall} instance, as returned by
L{str}, includes the unsigned id of the instance, as well as its state,
the function to be called, and the function arguments.
"""
dc = DelayedCall(12, nothing, (3, ), {"A": 5}, None, None, lambda: 1.5)
self.assertEqual(
str(dc),
"<DelayedCall 0x%x [10.5s] called=0 cancelled=0 nothing(3, A=5)>"
% (id(dc),),
)
def test_repr(self):
"""
The string representation of a L{DelayedCall} instance, as returned by
{repr}, is identical to that returned by L{str}.
"""
dc = DelayedCall(13, nothing, (6, ), {"A": 9}, None, None, lambda: 1.6)
self.assertEqual(str(dc), repr(dc))
def test_lt(self):
"""
For two instances of L{DelayedCall} C{a} and C{b}, C{a < b} is true
if and only if C{a} is scheduled to run before C{b}.
"""
zero, one = self.zero, self.one
self.assertTrue(zero < one)
self.assertFalse(one < zero)
self.assertFalse(zero < zero)
self.assertFalse(one < one)
def test_le(self):
"""
For two instances of L{DelayedCall} C{a} and C{b}, C{a <= b} is true
if and only if C{a} is scheduled to run before C{b} or at the same
time as C{b}.
"""
zero, one = self.zero, self.one
self.assertTrue(zero <= one)
self.assertFalse(one <= zero)
self.assertTrue(zero <= zero)
self.assertTrue(one <= one)
def test_gt(self):
"""
For two instances of L{DelayedCall} C{a} and C{b}, C{a > b} is true
if and only if C{a} is scheduled to run after C{b}.
"""
zero, one = self.zero, self.one
self.assertTrue(one > zero)
self.assertFalse(zero > one)
self.assertFalse(zero > zero)
self.assertFalse(one > one)
def test_ge(self):
"""
For two instances of L{DelayedCall} C{a} and C{b}, C{a > b} is true
if and only if C{a} is scheduled to run after C{b} or at the same
time as C{b}.
"""
zero, one = self.zero, self.one
self.assertTrue(one >= zero)
self.assertFalse(zero >= one)
self.assertTrue(zero >= zero)
self.assertTrue(one >= one)
def test_eq(self):
"""
A L{DelayedCall} instance is only equal to itself.
"""
# Explicitly use == here, instead of assertEqual, to be more
# confident __eq__ is being tested.
self.assertFalse(self.zero == self.one)
self.assertTrue(self.zero == self.zero)
self.assertTrue(self.one == self.one)
def test_ne(self):
"""
A L{DelayedCall} instance is not equal to any other object.
"""
# Explicitly use != here, instead of assertEqual, to be more
# confident __ne__ is being tested.
self.assertTrue(self.zero != self.one)
self.assertFalse(self.zero != self.zero)
self.assertFalse(self.one != self.one)
class DelayedCallNoDebugTests(DelayedCallMixin, TestCase):
"""
L{DelayedCall}
"""
def setUp(self):
"""
Turn debug off.
"""
self.patch(DelayedCall, 'debug', False)
DelayedCallMixin.setUp(self)
def test_str(self):
"""
The string representation of a L{DelayedCall} instance, as returned by
L{str}, includes the unsigned id of the instance, as well as its state,
the function to be called, and the function arguments.
"""
dc = DelayedCall(12, nothing, (3, ), {"A": 5}, None, None, lambda: 1.5)
expected = (
"<DelayedCall 0x{:x} [10.5s] called=0 cancelled=0 "
"nothing(3, A=5)>".format(id(dc)))
self.assertEqual(str(dc), expected)
class DelayedCallDebugTests(DelayedCallMixin, TestCase):
"""
L{DelayedCall}
"""
def setUp(self):
"""
Turn debug on.
"""
self.patch(DelayedCall, 'debug', True)
DelayedCallMixin.setUp(self)
def test_str(self):
"""
The string representation of a L{DelayedCall} instance, as returned by
L{str}, includes the unsigned id of the instance, as well as its state,
the function to be called, and the function arguments.
"""
dc = DelayedCall(12, nothing, (3, ), {"A": 5}, None, None, lambda: 1.5)
expectedRegexp = (
"<DelayedCall 0x{:x} \\[10.5s\\] called=0 cancelled=0 "
"nothing\\(3, A=5\\)\n\n"
"traceback at creation:".format(id(dc)))
self.assertRegex(
str(dc), expectedRegexp)
class TestSpySignalCapturingReactor(ReactorBase):
"""
Subclass of ReactorBase to capture signals delivered to the
reactor for inspection.
"""
def installWaker(self):
"""
Required method, unused.
"""
class ReactorBaseSignalTests(TestCase):
"""
Tests to exercise ReactorBase's signal exit reporting path.
"""
def test_exitSignalDefaultsToNone(self):
"""
The default value of the _exitSignal attribute is None.
"""
reactor = TestSpySignalCapturingReactor()
self.assertIs(None, reactor._exitSignal)
def test_captureSIGINT(self):
"""
ReactorBase's SIGINT handler saves the value of SIGINT to the
_exitSignal attribute.
"""
reactor = TestSpySignalCapturingReactor()
reactor.sigInt(signal.SIGINT, None)
self.assertEquals(signal.SIGINT, reactor._exitSignal)
def test_captureSIGTERM(self):
"""
ReactorBase's SIGTERM handler saves the value of SIGTERM to the
_exitSignal attribute.
"""
reactor = TestSpySignalCapturingReactor()
reactor.sigTerm(signal.SIGTERM, None)
self.assertEquals(signal.SIGTERM, reactor._exitSignal)
def test_captureSIGBREAK(self):
"""
ReactorBase's SIGBREAK handler saves the value of SIGBREAK to the
_exitSignal attribute.
"""
if not hasattr(signal, "SIGBREAK"):
raise SkipTest("signal module does not have SIGBREAK")
reactor = TestSpySignalCapturingReactor()
reactor.sigBreak(signal.SIGBREAK, None)
self.assertEquals(signal.SIGBREAK, reactor._exitSignal)
try:
import signal
except ImportError:
ReactorBaseSignalTests.skip = "signal module not available"

View file

@ -0,0 +1,73 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet._baseprocess} which implements process-related
functionality that is useful in all platforms supporting L{IReactorProcess}.
"""
__metaclass__ = type
from twisted.python.deprecate import getWarningMethod, setWarningMethod
from twisted.trial.unittest import TestCase
from twisted.internet._baseprocess import BaseProcess
class BaseProcessTests(TestCase):
"""
Tests for L{BaseProcess}, a parent class for other classes which represent
processes which implements functionality common to many different process
implementations.
"""
def test_callProcessExited(self):
"""
L{BaseProcess._callProcessExited} calls the C{processExited} method of
its C{proto} attribute and passes it a L{Failure} wrapping the given
exception.
"""
class FakeProto:
reason = None
def processExited(self, reason):
self.reason = reason
reason = RuntimeError("fake reason")
process = BaseProcess(FakeProto())
process._callProcessExited(reason)
process.proto.reason.trap(RuntimeError)
self.assertIs(reason, process.proto.reason.value)
def test_callProcessExitedMissing(self):
"""
L{BaseProcess._callProcessExited} emits a L{DeprecationWarning} if the
object referred to by its C{proto} attribute has no C{processExited}
method.
"""
class FakeProto:
pass
reason = object()
process = BaseProcess(FakeProto())
self.addCleanup(setWarningMethod, getWarningMethod())
warnings = []
def collect(message, category, stacklevel):
warnings.append((message, category, stacklevel))
setWarningMethod(collect)
process._callProcessExited(reason)
[(message, category, stacklevel)] = warnings
self.assertEqual(
message,
"Since Twisted 8.2, IProcessProtocol.processExited is required. "
"%s.%s must implement it." % (
FakeProto.__module__, FakeProto.__name__))
self.assertIs(category, DeprecationWarning)
# The stacklevel doesn't really make sense for this kind of
# deprecation. Requiring it to be 0 will at least avoid pointing to
# any part of Twisted or a random part of the application's code, which
# I think would be more misleading than having it point inside the
# warning system itself. -exarkun
self.assertEqual(stacklevel, 0)

View file

@ -0,0 +1,333 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorCore}.
"""
from __future__ import division, absolute_import
__metaclass__ = type
import signal
import time
import inspect
from twisted.internet.abstract import FileDescriptor
from twisted.internet.error import ReactorAlreadyRunning, ReactorNotRestartable
from twisted.internet.defer import Deferred
from twisted.internet.test.reactormixins import ReactorBuilder
class ObjectModelIntegrationMixin(object):
"""
Helpers for tests about the object model of reactor-related objects.
"""
def assertFullyNewStyle(self, instance):
"""
Assert that the given object is an instance of a new-style class and
that there are no classic classes in the inheritance hierarchy of
that class.
This is a beneficial condition because PyPy is better able to
optimize attribute lookup on such classes.
"""
self.assertIsInstance(instance, object)
mro = inspect.getmro(type(instance))
for subclass in mro:
self.assertTrue(
issubclass(subclass, object),
"%r is not new-style" % (subclass,))
class ObjectModelIntegrationTests(ReactorBuilder, ObjectModelIntegrationMixin):
"""
Test details of object model integration against all reactors.
"""
def test_newstyleReactor(self):
"""
Checks that all reactors on a platform have method resolution order
containing only new style classes.
"""
reactor = self.buildReactor()
self.assertFullyNewStyle(reactor)
class SystemEventTestsBuilder(ReactorBuilder):
"""
Builder defining tests relating to L{IReactorCore.addSystemEventTrigger}
and L{IReactorCore.fireSystemEvent}.
"""
def test_stopWhenNotStarted(self):
"""
C{reactor.stop()} raises L{RuntimeError} when called when the reactor
has not been started.
"""
reactor = self.buildReactor()
self.assertRaises(RuntimeError, reactor.stop)
def test_stopWhenAlreadyStopped(self):
"""
C{reactor.stop()} raises L{RuntimeError} when called after the reactor
has been stopped.
"""
reactor = self.buildReactor()
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
self.assertRaises(RuntimeError, reactor.stop)
def test_callWhenRunningOrder(self):
"""
Functions are run in the order that they were passed to
L{reactor.callWhenRunning}.
"""
reactor = self.buildReactor()
events = []
reactor.callWhenRunning(events.append, "first")
reactor.callWhenRunning(events.append, "second")
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
self.assertEqual(events, ["first", "second"])
def test_runningForStartupEvents(self):
"""
The reactor is not running when C{"before"} C{"startup"} triggers are
called and is running when C{"during"} and C{"after"} C{"startup"}
triggers are called.
"""
reactor = self.buildReactor()
state = {}
def beforeStartup():
state['before'] = reactor.running
def duringStartup():
state['during'] = reactor.running
def afterStartup():
state['after'] = reactor.running
reactor.addSystemEventTrigger("before", "startup", beforeStartup)
reactor.addSystemEventTrigger("during", "startup", duringStartup)
reactor.addSystemEventTrigger("after", "startup", afterStartup)
reactor.callWhenRunning(reactor.stop)
self.assertEqual(state, {})
self.runReactor(reactor)
self.assertEqual(
state,
{"before": False,
"during": True,
"after": True})
def test_signalHandlersInstalledDuringStartup(self):
"""
Signal handlers are installed in responsed to the C{"during"}
C{"startup"}.
"""
reactor = self.buildReactor()
phase = [None]
def beforeStartup():
phase[0] = "before"
def afterStartup():
phase[0] = "after"
reactor.addSystemEventTrigger("before", "startup", beforeStartup)
reactor.addSystemEventTrigger("after", "startup", afterStartup)
sawPhase = []
def fakeSignal(signum, action):
sawPhase.append(phase[0])
self.patch(signal, 'signal', fakeSignal)
reactor.callWhenRunning(reactor.stop)
self.assertIsNone(phase[0])
self.assertEqual(sawPhase, [])
self.runReactor(reactor)
self.assertIn("before", sawPhase)
self.assertEqual(phase[0], "after")
def test_stopShutDownEvents(self):
"""
C{reactor.stop()} fires all three phases of shutdown event triggers
before it makes C{reactor.run()} return.
"""
reactor = self.buildReactor()
events = []
reactor.addSystemEventTrigger(
"before", "shutdown",
lambda: events.append(("before", "shutdown")))
reactor.addSystemEventTrigger(
"during", "shutdown",
lambda: events.append(("during", "shutdown")))
reactor.addSystemEventTrigger(
"after", "shutdown",
lambda: events.append(("after", "shutdown")))
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
self.assertEqual(events, [("before", "shutdown"),
("during", "shutdown"),
("after", "shutdown")])
def test_shutdownFiresTriggersAsynchronously(self):
"""
C{"before"} C{"shutdown"} triggers are not run synchronously from
L{reactor.stop}.
"""
reactor = self.buildReactor()
events = []
reactor.addSystemEventTrigger(
"before", "shutdown", events.append, "before shutdown")
def stopIt():
reactor.stop()
events.append("stopped")
reactor.callWhenRunning(stopIt)
self.assertEqual(events, [])
self.runReactor(reactor)
self.assertEqual(events, ["stopped", "before shutdown"])
def test_shutdownDisconnectsCleanly(self):
"""
A L{IFileDescriptor.connectionLost} implementation which raises an
exception does not prevent the remaining L{IFileDescriptor}s from
having their C{connectionLost} method called.
"""
lostOK = [False]
# Subclass FileDescriptor to get logPrefix
class ProblematicFileDescriptor(FileDescriptor):
def connectionLost(self, reason):
raise RuntimeError("simulated connectionLost error")
class OKFileDescriptor(FileDescriptor):
def connectionLost(self, reason):
lostOK[0] = True
reactor = self.buildReactor()
# Unfortunately, it is necessary to patch removeAll to directly control
# the order of the returned values. The test is only valid if
# ProblematicFileDescriptor comes first. Also, return these
# descriptors only the first time removeAll is called so that if it is
# called again the file descriptors aren't re-disconnected.
fds = iter([ProblematicFileDescriptor(), OKFileDescriptor()])
reactor.removeAll = lambda: fds
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1)
self.assertTrue(lostOK[0])
def test_multipleRun(self):
"""
C{reactor.run()} raises L{ReactorAlreadyRunning} when called when
the reactor is already running.
"""
events = []
def reentrantRun():
self.assertRaises(ReactorAlreadyRunning, reactor.run)
events.append("tested")
reactor = self.buildReactor()
reactor.callWhenRunning(reentrantRun)
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
self.assertEqual(events, ["tested"])
def test_runWithAsynchronousBeforeStartupTrigger(self):
"""
When there is a C{'before'} C{'startup'} trigger which returns an
unfired L{Deferred}, C{reactor.run()} starts the reactor and does not
return until after C{reactor.stop()} is called
"""
events = []
def trigger():
events.append('trigger')
d = Deferred()
d.addCallback(callback)
reactor.callLater(0, d.callback, None)
return d
def callback(ignored):
events.append('callback')
reactor.stop()
reactor = self.buildReactor()
reactor.addSystemEventTrigger('before', 'startup', trigger)
self.runReactor(reactor)
self.assertEqual(events, ['trigger', 'callback'])
def test_iterate(self):
"""
C{reactor.iterate()} does not block.
"""
reactor = self.buildReactor()
t = reactor.callLater(5, reactor.crash)
start = time.time()
reactor.iterate(0) # Shouldn't block
elapsed = time.time() - start
self.assertTrue(elapsed < 2)
t.cancel()
def test_crash(self):
"""
C{reactor.crash()} stops the reactor and does not fire shutdown
triggers.
"""
reactor = self.buildReactor()
events = []
reactor.addSystemEventTrigger(
"before", "shutdown",
lambda: events.append(("before", "shutdown")))
reactor.callWhenRunning(reactor.callLater, 0, reactor.crash)
self.runReactor(reactor)
self.assertFalse(reactor.running)
self.assertFalse(
events,
"Shutdown triggers invoked but they should not have been.")
def test_runAfterCrash(self):
"""
C{reactor.run()} restarts the reactor after it has been stopped by
C{reactor.crash()}.
"""
events = []
def crash():
events.append('crash')
reactor.crash()
reactor = self.buildReactor()
reactor.callWhenRunning(crash)
self.runReactor(reactor)
def stop():
events.append(('stop', reactor.running))
reactor.stop()
reactor.callWhenRunning(stop)
self.runReactor(reactor)
self.assertEqual(events, ['crash', ('stop', True)])
def test_runAfterStop(self):
"""
C{reactor.run()} raises L{ReactorNotRestartable} when called when
the reactor is being run after getting stopped priorly.
"""
events = []
def restart():
self.assertRaises(ReactorNotRestartable, reactor.run)
events.append('tested')
reactor = self.buildReactor()
reactor.callWhenRunning(reactor.stop)
reactor.addSystemEventTrigger('after', 'shutdown', restart)
self.runReactor(reactor)
self.assertEqual(events, ['tested'])
globals().update(SystemEventTestsBuilder.makeTestCaseClasses())
globals().update(ObjectModelIntegrationTests.makeTestCaseClasses())

View file

@ -0,0 +1,58 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A wrapper for L{twisted.internet.test._awaittests}, as that test module
includes keywords not valid in Pythons before 3.5.
"""
from __future__ import absolute_import, division
from twisted.python.compat import _PY35PLUS, _PY3, execfile
from twisted.python.filepath import FilePath
from twisted.trial import unittest
if _PY35PLUS:
_path = FilePath(__file__).parent().child("_awaittests.py.3only")
_g = {"__name__": __name__ + ".3-only.awaittests"}
execfile(_path.path, _g)
AwaitTests = _g["AwaitTests"]
else:
class AwaitTests(unittest.SynchronousTestCase):
"""
A dummy class to show that this test file was discovered but the tests
are unable to be run in this version of Python.
"""
skip = "async/await is not available before Python 3.5"
def test_notAvailable(self):
"""
A skipped test to show that this was not run because the Python is
too old.
"""
if _PY3:
_path = FilePath(__file__).parent().child("_yieldfromtests.py.3only")
_g = {"__name__": __name__ + ".3-only.yieldfromtests"}
execfile(_path.path, _g)
YieldFromTests = _g["YieldFromTests"]
else:
class YieldFromTests(unittest.SynchronousTestCase):
"""
A dummy class to show that this test file was discovered but the tests
are unable to be run in this version of Python.
"""
skip = "yield from is not available before Python 3"
def test_notAvailable(self):
"""
A skipped test to show that this was not run because the Python is
too old.
"""
__all__ = ["AwaitTests", "YieldFromTests"]

View file

@ -0,0 +1,119 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.default}.
"""
from __future__ import division, absolute_import
import select, sys
from twisted.trial.unittest import SynchronousTestCase
from twisted.python.runtime import Platform
from twisted.python.reflect import requireModule
from twisted.internet import default
from twisted.internet.default import _getInstallFunction, install
from twisted.internet.test.test_main import NoReactor
from twisted.internet.interfaces import IReactorCore
unix = Platform('posix', 'other')
linux = Platform('posix', 'linux2')
windows = Platform('nt', 'win32')
osx = Platform('posix', 'darwin')
class PollReactorTests(SynchronousTestCase):
"""
Tests for the cases of L{twisted.internet.default._getInstallFunction}
in which it picks the poll(2) or epoll(7)-based reactors.
"""
def assertIsPoll(self, install):
"""
Assert the given function will install the poll() reactor, or select()
if poll() is unavailable.
"""
if hasattr(select, "poll"):
self.assertEqual(
install.__module__, 'twisted.internet.pollreactor')
else:
self.assertEqual(
install.__module__, 'twisted.internet.selectreactor')
def test_unix(self):
"""
L{_getInstallFunction} chooses the poll reactor on arbitrary Unix
platforms, falling back to select(2) if it is unavailable.
"""
install = _getInstallFunction(unix)
self.assertIsPoll(install)
def test_linux(self):
"""
L{_getInstallFunction} chooses the epoll reactor on Linux, or poll if
epoll is unavailable.
"""
install = _getInstallFunction(linux)
if requireModule('twisted.internet.epollreactor') is None:
self.assertIsPoll(install)
else:
self.assertEqual(
install.__module__, 'twisted.internet.epollreactor')
class SelectReactorTests(SynchronousTestCase):
"""
Tests for the cases of L{twisted.internet.default._getInstallFunction}
in which it picks the select(2)-based reactor.
"""
def test_osx(self):
"""
L{_getInstallFunction} chooses the select reactor on macOS.
"""
install = _getInstallFunction(osx)
self.assertEqual(
install.__module__, 'twisted.internet.selectreactor')
def test_windows(self):
"""
L{_getInstallFunction} chooses the select reactor on Windows.
"""
install = _getInstallFunction(windows)
self.assertEqual(
install.__module__, 'twisted.internet.selectreactor')
class InstallationTests(SynchronousTestCase):
"""
Tests for actual installation of the reactor.
"""
def test_install(self):
"""
L{install} installs a reactor.
"""
with NoReactor():
install()
self.assertIn("twisted.internet.reactor", sys.modules)
def test_reactor(self):
"""
Importing L{twisted.internet.reactor} installs the default reactor if
none is installed.
"""
installed = []
def installer():
installed.append(True)
return install()
self.patch(default, "install", installer)
with NoReactor():
from twisted.internet import reactor
self.assertTrue(IReactorCore.providedBy(reactor))
self.assertEqual(installed, [True])

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,248 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.epollreactor}.
"""
from __future__ import division, absolute_import
from twisted.trial.unittest import TestCase
try:
from twisted.internet.epollreactor import _ContinuousPolling
except ImportError:
_ContinuousPolling = None
from twisted.internet.task import Clock
from twisted.internet.error import ConnectionDone
class Descriptor(object):
"""
Records reads and writes, as if it were a C{FileDescriptor}.
"""
def __init__(self):
self.events = []
def fileno(self):
return 1
def doRead(self):
self.events.append("read")
def doWrite(self):
self.events.append("write")
def connectionLost(self, reason):
reason.trap(ConnectionDone)
self.events.append("lost")
class ContinuousPollingTests(TestCase):
"""
L{_ContinuousPolling} can be used to read and write from C{FileDescriptor}
objects.
"""
def test_addReader(self):
"""
Adding a reader when there was previously no reader starts up a
C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
self.assertIsNone(poller._loop)
reader = object()
self.assertFalse(poller.isReading(reader))
poller.addReader(reader)
self.assertIsNotNone(poller._loop)
self.assertTrue(poller._loop.running)
self.assertIs(poller._loop.clock, poller._reactor)
self.assertTrue(poller.isReading(reader))
def test_addWriter(self):
"""
Adding a writer when there was previously no writer starts up a
C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
self.assertIsNone(poller._loop)
writer = object()
self.assertFalse(poller.isWriting(writer))
poller.addWriter(writer)
self.assertIsNotNone(poller._loop)
self.assertTrue(poller._loop.running)
self.assertIs(poller._loop.clock, poller._reactor)
self.assertTrue(poller.isWriting(writer))
def test_removeReader(self):
"""
Removing a reader stops the C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
reader = object()
poller.addReader(reader)
poller.removeReader(reader)
self.assertIsNone(poller._loop)
self.assertEqual(poller._reactor.getDelayedCalls(), [])
self.assertFalse(poller.isReading(reader))
def test_removeWriter(self):
"""
Removing a writer stops the C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
writer = object()
poller.addWriter(writer)
poller.removeWriter(writer)
self.assertIsNone(poller._loop)
self.assertEqual(poller._reactor.getDelayedCalls(), [])
self.assertFalse(poller.isWriting(writer))
def test_removeUnknown(self):
"""
Removing unknown readers and writers silently does nothing.
"""
poller = _ContinuousPolling(Clock())
poller.removeWriter(object())
poller.removeReader(object())
def test_multipleReadersAndWriters(self):
"""
Adding multiple readers and writers results in a single
C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
writer = object()
poller.addWriter(writer)
self.assertIsNotNone(poller._loop)
poller.addWriter(object())
self.assertIsNotNone(poller._loop)
poller.addReader(object())
self.assertIsNotNone(poller._loop)
poller.addReader(object())
poller.removeWriter(writer)
self.assertIsNotNone(poller._loop)
self.assertTrue(poller._loop.running)
self.assertEqual(len(poller._reactor.getDelayedCalls()), 1)
def test_readerPolling(self):
"""
Adding a reader causes its C{doRead} to be called every 1
milliseconds.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
poller.addReader(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.00001)
self.assertEqual(desc.events, ["read"])
reactor.advance(0.00001)
self.assertEqual(desc.events, ["read", "read"])
reactor.advance(0.00001)
self.assertEqual(desc.events, ["read", "read", "read"])
def test_writerPolling(self):
"""
Adding a writer causes its C{doWrite} to be called every 1
milliseconds.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
poller.addWriter(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.001)
self.assertEqual(desc.events, ["write"])
reactor.advance(0.001)
self.assertEqual(desc.events, ["write", "write"])
reactor.advance(0.001)
self.assertEqual(desc.events, ["write", "write", "write"])
def test_connectionLostOnRead(self):
"""
If a C{doRead} returns a value indicating disconnection,
C{connectionLost} is called on it.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
desc.doRead = lambda: ConnectionDone()
poller.addReader(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.001)
self.assertEqual(desc.events, ["lost"])
def test_connectionLostOnWrite(self):
"""
If a C{doWrite} returns a value indicating disconnection,
C{connectionLost} is called on it.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
desc.doWrite = lambda: ConnectionDone()
poller.addWriter(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.001)
self.assertEqual(desc.events, ["lost"])
def test_removeAll(self):
"""
L{_ContinuousPolling.removeAll} removes all descriptors and returns
the readers and writers.
"""
poller = _ContinuousPolling(Clock())
reader = object()
writer = object()
both = object()
poller.addReader(reader)
poller.addReader(both)
poller.addWriter(writer)
poller.addWriter(both)
removed = poller.removeAll()
self.assertEqual(poller.getReaders(), [])
self.assertEqual(poller.getWriters(), [])
self.assertEqual(len(removed), 3)
self.assertEqual(set(removed), set([reader, writer, both]))
def test_getReaders(self):
"""
L{_ContinuousPolling.getReaders} returns a list of the read
descriptors.
"""
poller = _ContinuousPolling(Clock())
reader = object()
poller.addReader(reader)
self.assertIn(reader, poller.getReaders())
def test_getWriters(self):
"""
L{_ContinuousPolling.getWriters} returns a list of the write
descriptors.
"""
poller = _ContinuousPolling(Clock())
writer = object()
poller.addWriter(writer)
self.assertIn(writer, poller.getWriters())
if _ContinuousPolling is None:
skip = "epoll not supported in this environment."

View file

@ -0,0 +1,41 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.error}
"""
from __future__ import division, absolute_import
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet import error
class ConnectionAbortedTests(SynchronousTestCase):
"""
Tests for the L{twisted.internet.error.ConnectionAborted} exception.
"""
def test_str(self):
"""
The default message of L{ConnectionAborted} is a sentence which points
to L{ITCPTransport.abortConnection()}
"""
self.assertEqual(
("Connection was aborted locally"
" using ITCPTransport.abortConnection."),
str(error.ConnectionAborted()),
)
def test_strArgs(self):
"""
Any arguments passed to L{ConnectionAborted} are included in its
message.
"""
self.assertEqual(
("Connection was aborted locally using"
" ITCPTransport.abortConnection:"
" foo bar."),
str(error.ConnectionAborted('foo', 'bar')),
)

View file

@ -0,0 +1,427 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorFDSet}.
"""
__metaclass__ = type
import os, socket, traceback
from zope.interface import implementer
from twisted.python.runtime import platform
from twisted.trial.unittest import SkipTest
from twisted.internet.interfaces import IReactorFDSet, IReadDescriptor
from twisted.internet.abstract import FileDescriptor
from twisted.internet.test.reactormixins import ReactorBuilder
# twisted.internet.tcp nicely defines some names with proper values on
# several different platforms.
from twisted.internet.tcp import EINPROGRESS, EWOULDBLOCK
def socketpair():
serverSocket = socket.socket()
serverSocket.bind(('127.0.0.1', 0))
serverSocket.listen(1)
try:
client = socket.socket()
try:
client.setblocking(False)
try:
client.connect(('127.0.0.1', serverSocket.getsockname()[1]))
except socket.error as e:
if e.args[0] not in (EINPROGRESS, EWOULDBLOCK):
raise
server, addr = serverSocket.accept()
except:
client.close()
raise
finally:
serverSocket.close()
return client, server
class ReactorFDSetTestsBuilder(ReactorBuilder):
"""
Builder defining tests relating to L{IReactorFDSet}.
"""
requiredInterfaces = [IReactorFDSet]
def _connectedPair(self):
"""
Return the two sockets which make up a new TCP connection.
"""
client, server = socketpair()
self.addCleanup(client.close)
self.addCleanup(server.close)
return client, server
def _simpleSetup(self):
reactor = self.buildReactor()
client, server = self._connectedPair()
fd = FileDescriptor(reactor)
fd.fileno = client.fileno
return reactor, fd, server
def test_addReader(self):
"""
C{reactor.addReader()} accepts an L{IReadDescriptor} provider and calls
its C{doRead} method when there may be data available on its C{fileno}.
"""
reactor, fd, server = self._simpleSetup()
def removeAndStop():
reactor.removeReader(fd)
reactor.stop()
fd.doRead = removeAndStop
reactor.addReader(fd)
server.sendall(b'x')
# The reactor will only stop if it calls fd.doRead.
self.runReactor(reactor)
# Nothing to assert, just be glad we got this far.
def test_removeReader(self):
"""
L{reactor.removeReader()} accepts an L{IReadDescriptor} provider
previously passed to C{reactor.addReader()} and causes it to no longer
be monitored for input events.
"""
reactor, fd, server = self._simpleSetup()
def fail():
self.fail("doRead should not be called")
fd.doRead = fail
reactor.addReader(fd)
reactor.removeReader(fd)
server.sendall(b'x')
# Give the reactor two timed event passes to notice that there's I/O
# (if it is incorrectly watching for I/O).
reactor.callLater(0, reactor.callLater, 0, reactor.stop)
self.runReactor(reactor)
# Getting here means the right thing happened probably.
def test_addWriter(self):
"""
C{reactor.addWriter()} accepts an L{IWriteDescriptor} provider and
calls its C{doWrite} method when it may be possible to write to its
C{fileno}.
"""
reactor, fd, server = self._simpleSetup()
def removeAndStop():
reactor.removeWriter(fd)
reactor.stop()
fd.doWrite = removeAndStop
reactor.addWriter(fd)
self.runReactor(reactor)
# Getting here is great.
def _getFDTest(self, kind):
"""
Helper for getReaders and getWriters tests.
"""
reactor = self.buildReactor()
get = getattr(reactor, 'get' + kind + 's')
add = getattr(reactor, 'add' + kind)
remove = getattr(reactor, 'remove' + kind)
client, server = self._connectedPair()
self.assertNotIn(client, get())
self.assertNotIn(server, get())
add(client)
self.assertIn(client, get())
self.assertNotIn(server, get())
remove(client)
self.assertNotIn(client, get())
self.assertNotIn(server, get())
def test_getReaders(self):
"""
L{IReactorFDSet.getReaders} reflects the additions and removals made
with L{IReactorFDSet.addReader} and L{IReactorFDSet.removeReader}.
"""
self._getFDTest('Reader')
def test_removeWriter(self):
"""
L{reactor.removeWriter()} accepts an L{IWriteDescriptor} provider
previously passed to C{reactor.addWriter()} and causes it to no longer
be monitored for outputability.
"""
reactor, fd, server = self._simpleSetup()
def fail():
self.fail("doWrite should not be called")
fd.doWrite = fail
reactor.addWriter(fd)
reactor.removeWriter(fd)
# Give the reactor two timed event passes to notice that there's I/O
# (if it is incorrectly watching for I/O).
reactor.callLater(0, reactor.callLater, 0, reactor.stop)
self.runReactor(reactor)
# Getting here means the right thing happened probably.
def test_getWriters(self):
"""
L{IReactorFDSet.getWriters} reflects the additions and removals made
with L{IReactorFDSet.addWriter} and L{IReactorFDSet.removeWriter}.
"""
self._getFDTest('Writer')
def test_removeAll(self):
"""
C{reactor.removeAll()} removes all registered L{IReadDescriptor}
providers and all registered L{IWriteDescriptor} providers and returns
them.
"""
reactor = self.buildReactor()
reactor, fd, server = self._simpleSetup()
fd.doRead = lambda: self.fail("doRead should not be called")
fd.doWrite = lambda: self.fail("doWrite should not be called")
server.sendall(b'x')
reactor.addReader(fd)
reactor.addWriter(fd)
removed = reactor.removeAll()
# Give the reactor two timed event passes to notice that there's I/O
# (if it is incorrectly watching for I/O).
reactor.callLater(0, reactor.callLater, 0, reactor.stop)
self.runReactor(reactor)
# Getting here means the right thing happened probably.
self.assertEqual(removed, [fd])
def test_removedFromReactor(self):
"""
A descriptor's C{fileno} method should not be called after the
descriptor has been removed from the reactor.
"""
reactor = self.buildReactor()
descriptor = RemovingDescriptor(reactor)
reactor.callWhenRunning(descriptor.start)
self.runReactor(reactor)
self.assertEqual(descriptor.calls, [])
def test_negativeOneFileDescriptor(self):
"""
If L{FileDescriptor.fileno} returns C{-1}, the descriptor is removed
from the reactor.
"""
reactor = self.buildReactor()
client, server = self._connectedPair()
class DisappearingDescriptor(FileDescriptor):
_fileno = server.fileno()
_received = b""
def fileno(self):
return self._fileno
def doRead(self):
self._fileno = -1
self._received += server.recv(1)
client.send(b'y')
def connectionLost(self, reason):
reactor.stop()
descriptor = DisappearingDescriptor(reactor)
reactor.addReader(descriptor)
client.send(b'x')
self.runReactor(reactor)
self.assertEqual(descriptor._received, b"x")
def test_lostFileDescriptor(self):
"""
The file descriptor underlying a FileDescriptor may be closed and
replaced by another at some point. Bytes which arrive on the new
descriptor must not be delivered to the FileDescriptor which was
originally registered with the original descriptor of the same number.
Practically speaking, this is difficult or impossible to detect. The
implementation relies on C{fileno} raising an exception if the original
descriptor has gone away. If C{fileno} continues to return the original
file descriptor value, the reactor may deliver events from that
descriptor. This is a best effort attempt to ease certain debugging
situations. Applications should not rely on it intentionally.
"""
reactor = self.buildReactor()
name = reactor.__class__.__name__
if name in ('EPollReactor', 'KQueueReactor', 'CFReactor',
'AsyncioSelectorReactor'):
# Closing a file descriptor immediately removes it from the epoll
# set without generating a notification. That means epollreactor
# will not call any methods on Victim after the close, so there's
# no chance to notice the socket is no longer valid.
raise SkipTest("%r cannot detect lost file descriptors" % (name,))
client, server = self._connectedPair()
class Victim(FileDescriptor):
"""
This L{FileDescriptor} will have its socket closed out from under it
and another socket will take its place. It will raise a
socket.error from C{fileno} after this happens (because socket
objects remember whether they have been closed), so as long as the
reactor calls the C{fileno} method the problem will be detected.
"""
def fileno(self):
return server.fileno()
def doRead(self):
raise Exception("Victim.doRead should never be called")
def connectionLost(self, reason):
"""
When the problem is detected, the reactor should disconnect this
file descriptor. When that happens, stop the reactor so the
test ends.
"""
reactor.stop()
reactor.addReader(Victim())
# Arrange for the socket to be replaced at some unspecified time.
# Significantly, this will not be while any I/O processing code is on
# the stack. It is something that happens independently and cannot be
# relied upon to happen at a convenient time, such as within a call to
# doRead.
def messItUp():
newC, newS = self._connectedPair()
fileno = server.fileno()
server.close()
os.dup2(newS.fileno(), fileno)
newC.send(b"x")
reactor.callLater(0, messItUp)
self.runReactor(reactor)
# If the implementation feels like logging the exception raised by
# MessedUp.fileno, that's fine.
self.flushLoggedErrors(socket.error)
if platform.isWindows():
test_lostFileDescriptor.skip = (
"Cannot duplicate socket filenos on Windows")
def test_connectionLostOnShutdown(self):
"""
Any file descriptors added to the reactor have their C{connectionLost}
called when C{reactor.stop} is called.
"""
reactor = self.buildReactor()
class DoNothingDescriptor(FileDescriptor):
def doRead(self):
return None
def doWrite(self):
return None
client, server = self._connectedPair()
fd1 = DoNothingDescriptor(reactor)
fd1.fileno = client.fileno
fd2 = DoNothingDescriptor(reactor)
fd2.fileno = server.fileno
reactor.addReader(fd1)
reactor.addWriter(fd2)
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
self.assertTrue(fd1.disconnected)
self.assertTrue(fd2.disconnected)
@implementer(IReadDescriptor)
class RemovingDescriptor(object):
"""
A read descriptor which removes itself from the reactor as soon as it
gets a chance to do a read and keeps track of when its own C{fileno}
method is called.
@ivar insideReactor: A flag which is true as long as the reactor has
this descriptor as a reader.
@ivar calls: A list of the bottom of the call stack for any call to
C{fileno} when C{insideReactor} is false.
"""
def __init__(self, reactor):
self.reactor = reactor
self.insideReactor = False
self.calls = []
self.read, self.write = socketpair()
def start(self):
self.insideReactor = True
self.reactor.addReader(self)
self.write.send(b'a')
def logPrefix(self):
return 'foo'
def doRead(self):
self.reactor.removeReader(self)
self.insideReactor = False
self.reactor.stop()
self.read.close()
self.write.close()
def fileno(self):
if not self.insideReactor:
self.calls.append(traceback.extract_stack(limit=5)[:-1])
return self.read.fileno()
def connectionLost(self, reason):
# Ideally we'd close the descriptors here... but actually
# connectionLost is never called because we remove ourselves from the
# reactor before it stops.
pass
globals().update(ReactorFDSetTestsBuilder.makeTestCaseClasses())

View file

@ -0,0 +1,99 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Whitebox tests for L{twisted.internet.abstract.FileDescriptor}.
"""
from __future__ import division, absolute_import
from zope.interface.verify import verifyClass
from twisted.internet.abstract import FileDescriptor
from twisted.internet.interfaces import IPushProducer
from twisted.trial.unittest import SynchronousTestCase
class MemoryFile(FileDescriptor):
"""
A L{FileDescriptor} customization which writes to a Python list in memory
with certain limitations.
@ivar _written: A C{list} of C{bytes} which have been accepted as written.
@ivar _freeSpace: A C{int} giving the number of bytes which will be accepted
by future writes.
"""
connected = True
def __init__(self):
FileDescriptor.__init__(self, reactor=object())
self._written = []
self._freeSpace = 0
def startWriting(self):
pass
def stopWriting(self):
pass
def writeSomeData(self, data):
"""
Copy at most C{self._freeSpace} bytes from C{data} into C{self._written}.
@return: A C{int} indicating how many bytes were copied from C{data}.
"""
acceptLength = min(self._freeSpace, len(data))
if acceptLength:
self._freeSpace -= acceptLength
self._written.append(data[:acceptLength])
return acceptLength
class FileDescriptorTests(SynchronousTestCase):
"""
Tests for L{FileDescriptor}.
"""
def test_writeWithUnicodeRaisesException(self):
"""
L{FileDescriptor.write} doesn't accept unicode data.
"""
fileDescriptor = FileDescriptor(reactor=object())
self.assertRaises(TypeError, fileDescriptor.write, u'foo')
def test_writeSequenceWithUnicodeRaisesException(self):
"""
L{FileDescriptor.writeSequence} doesn't accept unicode data.
"""
fileDescriptor = FileDescriptor(reactor=object())
self.assertRaises(
TypeError, fileDescriptor.writeSequence, [b'foo', u'bar', b'baz'])
def test_implementInterfaceIPushProducer(self):
"""
L{FileDescriptor} should implement L{IPushProducer}.
"""
self.assertTrue(verifyClass(IPushProducer, FileDescriptor))
class WriteDescriptorTests(SynchronousTestCase):
"""
Tests for L{FileDescriptor}'s implementation of L{IWriteDescriptor}.
"""
def test_kernelBufferFull(self):
"""
When L{FileDescriptor.writeSomeData} returns C{0} to indicate no more
data can be written immediately, L{FileDescriptor.doWrite} returns
L{None}.
"""
descriptor = MemoryFile()
descriptor.write(b"hello, world")
self.assertIsNone(descriptor.doWrite())

View file

@ -0,0 +1,257 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
GI/GTK3 reactor tests.
"""
from __future__ import division, absolute_import, print_function
import sys, os
try:
from twisted.internet import gireactor
from gi.repository import Gio
except ImportError:
gireactor = None
gtk3reactor = None
else:
# gtk3reactor may be unavailable even if gireactor is available; in
# particular in pygobject 3.4/gtk 3.6, when no X11 DISPLAY is found.
try:
from twisted.internet import gtk3reactor
except ImportError:
gtk3reactor = None
else:
from gi.repository import Gtk
from twisted.python.filepath import FilePath
from twisted.python.runtime import platform
from twisted.internet.defer import Deferred
from twisted.internet.error import ReactorAlreadyRunning
from twisted.internet.protocol import ProcessProtocol
from twisted.trial.unittest import TestCase, SkipTest
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.test.test_twisted import SetAsideModule
from twisted.internet.interfaces import IReactorProcess
from twisted.python.compat import _PY3
# Skip all tests if gi is unavailable:
if gireactor is None:
skip = "gtk3/gi not importable"
class GApplicationRegistrationTests(ReactorBuilder, TestCase):
"""
GtkApplication and GApplication are supported by
L{twisted.internet.gtk3reactor} and L{twisted.internet.gireactor}.
We inherit from L{ReactorBuilder} in order to use some of its
reactor-running infrastructure, but don't need its test-creation
functionality.
"""
def runReactor(self, app, reactor):
"""
Register the app, run the reactor, make sure app was activated, and
that reactor was running, and that reactor can be stopped.
"""
if not hasattr(app, "quit"):
raise SkipTest("Version of PyGObject is too old.")
result = []
def stop():
result.append("stopped")
reactor.stop()
def activate(widget):
result.append("activated")
reactor.callLater(0, stop)
app.connect('activate', activate)
# We want reactor.stop() to *always* stop the event loop, even if
# someone has called hold() on the application and never done the
# corresponding release() -- for more details see
# http://developer.gnome.org/gio/unstable/GApplication.html.
app.hold()
reactor.registerGApplication(app)
ReactorBuilder.runReactor(self, reactor)
self.assertEqual(result, ["activated", "stopped"])
def test_gApplicationActivate(self):
"""
L{Gio.Application} instances can be registered with a gireactor.
"""
reactor = gireactor.GIReactor(useGtk=False)
self.addCleanup(self.unbuildReactor, reactor)
app = Gio.Application(
application_id='com.twistedmatrix.trial.gireactor',
flags=Gio.ApplicationFlags.FLAGS_NONE)
self.runReactor(app, reactor)
def test_gtkApplicationActivate(self):
"""
L{Gtk.Application} instances can be registered with a gtk3reactor.
"""
reactor = gtk3reactor.Gtk3Reactor()
self.addCleanup(self.unbuildReactor, reactor)
app = Gtk.Application(
application_id='com.twistedmatrix.trial.gtk3reactor',
flags=Gio.ApplicationFlags.FLAGS_NONE)
self.runReactor(app, reactor)
if gtk3reactor is None:
test_gtkApplicationActivate.skip = (
"Gtk unavailable (may require running with X11 DISPLAY env set)")
def test_portable(self):
"""
L{gireactor.PortableGIReactor} doesn't support application
registration at this time.
"""
reactor = gireactor.PortableGIReactor()
self.addCleanup(self.unbuildReactor, reactor)
app = Gio.Application(
application_id='com.twistedmatrix.trial.gireactor',
flags=Gio.ApplicationFlags.FLAGS_NONE)
self.assertRaises(NotImplementedError,
reactor.registerGApplication, app)
def test_noQuit(self):
"""
Older versions of PyGObject lack C{Application.quit}, and so won't
allow registration.
"""
reactor = gireactor.GIReactor(useGtk=False)
self.addCleanup(self.unbuildReactor, reactor)
# An app with no "quit" method:
app = object()
exc = self.assertRaises(RuntimeError, reactor.registerGApplication, app)
self.assertTrue(exc.args[0].startswith(
"Application registration is not"))
def test_cantRegisterAfterRun(self):
"""
It is not possible to register a C{Application} after the reactor has
already started.
"""
reactor = gireactor.GIReactor(useGtk=False)
self.addCleanup(self.unbuildReactor, reactor)
app = Gio.Application(
application_id='com.twistedmatrix.trial.gireactor',
flags=Gio.ApplicationFlags.FLAGS_NONE)
def tryRegister():
exc = self.assertRaises(ReactorAlreadyRunning,
reactor.registerGApplication, app)
self.assertEqual(exc.args[0],
"Can't register application after reactor was started.")
reactor.stop()
reactor.callLater(0, tryRegister)
ReactorBuilder.runReactor(self, reactor)
def test_cantRegisterTwice(self):
"""
It is not possible to register more than one C{Application}.
"""
reactor = gireactor.GIReactor(useGtk=False)
self.addCleanup(self.unbuildReactor, reactor)
app = Gio.Application(
application_id='com.twistedmatrix.trial.gireactor',
flags=Gio.ApplicationFlags.FLAGS_NONE)
reactor.registerGApplication(app)
app2 = Gio.Application(
application_id='com.twistedmatrix.trial.gireactor2',
flags=Gio.ApplicationFlags.FLAGS_NONE)
exc = self.assertRaises(RuntimeError,
reactor.registerGApplication, app2)
self.assertEqual(exc.args[0],
"Can't register more than one application instance.")
class PygtkCompatibilityTests(TestCase):
"""
pygtk imports are either prevented, or a compatibility layer is used if
possible.
"""
def test_noCompatibilityLayer(self):
"""
If no compatibility layer is present, imports of gobject and friends
are disallowed.
We do this by running a process where we make sure gi.pygtkcompat
isn't present.
"""
if _PY3:
raise SkipTest("Python3 always has the compatibility layer.")
from twisted.internet import reactor
if not IReactorProcess.providedBy(reactor):
raise SkipTest("No process support available in this reactor.")
result = Deferred()
class Stdout(ProcessProtocol):
data = b""
def errReceived(self, err):
print(err)
def outReceived(self, data):
self.data += data
def processExited(self, reason):
result.callback(self.data)
path = FilePath(__file__).sibling(b"process_gireactornocompat.py").path
pyExe = FilePath(sys.executable)._asBytesPath()
# Pass in a PYTHONPATH that is the test runner's os.path, to make sure
# we're running from a checkout
reactor.spawnProcess(Stdout(), pyExe, [pyExe, path],
env={"PYTHONPATH": ":".join(sys.path)})
result.addCallback(self.assertEqual, b"success")
return result
def test_compatibilityLayer(self):
"""
If compatibility layer is present, importing gobject uses the gi
compatibility layer.
"""
if "gi.pygtkcompat" not in sys.modules:
raise SkipTest("This version of gi doesn't include pygtkcompat.")
import gobject
self.assertTrue(gobject.__name__.startswith("gi."))
class Gtk3ReactorTests(TestCase):
"""
Tests for L{gtk3reactor}.
"""
def test_requiresDISPLAY(self):
"""
On X11, L{gtk3reactor} is unimportable if the C{DISPLAY} environment
variable is not set.
"""
display = os.environ.get("DISPLAY", None)
if display is not None:
self.addCleanup(os.environ.__setitem__, "DISPLAY", display)
del os.environ["DISPLAY"]
with SetAsideModule("twisted.internet.gtk3reactor"):
exc = self.assertRaises(ImportError,
__import__, "twisted.internet.gtk3reactor")
self.assertEqual(
exc.args[0],
"Gtk3 requires X11, and no DISPLAY environment variable is set")
if platform.getType() != "posix" or platform.isMacOSX():
test_requiresDISPLAY.skip = "This test is only relevant when using X11"

View file

@ -0,0 +1,68 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted.internet.glibbase.
"""
from __future__ import division, absolute_import
import sys
from twisted.trial.unittest import TestCase
from twisted.internet._glibbase import ensureNotImported
class EnsureNotImportedTests(TestCase):
"""
L{ensureNotImported} protects against unwanted past and future imports.
"""
def test_ensureWhenNotImported(self):
"""
If the specified modules have never been imported, and import
prevention is requested, L{ensureNotImported} makes sure they will not
be imported in the future.
"""
modules = {}
self.patch(sys, "modules", modules)
ensureNotImported(["m1", "m2"], "A message.",
preventImports=["m1", "m2", "m3"])
self.assertEqual(modules, {"m1": None, "m2": None, "m3": None})
def test_ensureWhenNotImportedDontPrevent(self):
"""
If the specified modules have never been imported, and import
prevention is not requested, L{ensureNotImported} has no effect.
"""
modules = {}
self.patch(sys, "modules", modules)
ensureNotImported(["m1", "m2"], "A message.")
self.assertEqual(modules, {})
def test_ensureWhenFailedToImport(self):
"""
If the specified modules have been set to L{None} in C{sys.modules},
L{ensureNotImported} does not complain.
"""
modules = {"m2": None}
self.patch(sys, "modules", modules)
ensureNotImported(["m1", "m2"], "A message.", preventImports=["m1", "m2"])
self.assertEqual(modules, {"m1": None, "m2": None})
def test_ensureFailsWhenImported(self):
"""
If one of the specified modules has been previously imported,
L{ensureNotImported} raises an exception.
"""
module = object()
modules = {"m2": module}
self.patch(sys, "modules", modules)
e = self.assertRaises(ImportError, ensureNotImported,
["m1", "m2"], "A message.",
preventImports=["m1", "m2"])
self.assertEqual(modules, {"m2": module})
self.assertEqual(e.args, ("A message.",))

View file

@ -0,0 +1,384 @@
# -*- test-case-name: twisted.internet.test.test_inlinecb -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.defer.inlineCallbacks}.
Some tests for inlineCallbacks are defined in L{twisted.test.test_defgen} as
well.
"""
from __future__ import division, absolute_import
import sys
from twisted.trial.unittest import TestCase, SynchronousTestCase
from twisted.internet.defer import (
Deferred, returnValue, inlineCallbacks, CancelledError)
class StopIterationReturnTests(TestCase):
"""
On Python 3.4 and newer generator functions may use the C{return} statement
with a value, which is attached to the L{StopIteration} exception that is
raised.
L{inlineCallbacks} will use this value when it fires the C{callback}.
"""
def test_returnWithValue(self):
"""
If the C{return} statement has a value it is propagated back to the
L{Deferred} that the C{inlineCallbacks} function returned.
"""
environ = {"inlineCallbacks": inlineCallbacks}
exec("""
@inlineCallbacks
def f(d):
yield d
return 14
""", environ)
d1 = Deferred()
d2 = environ["f"](d1)
d1.callback(None)
self.assertEqual(self.successResultOf(d2), 14)
if sys.version_info < (3, 4):
StopIterationReturnTests.skip = "Test requires Python 3.4 or greater"
class NonLocalExitTests(TestCase):
"""
It's possible for L{returnValue} to be (accidentally) invoked at a stack
level below the L{inlineCallbacks}-decorated function which it is exiting.
If this happens, L{returnValue} should report useful errors.
If L{returnValue} is invoked from a function not decorated by
L{inlineCallbacks}, it will emit a warning if it causes an
L{inlineCallbacks} function further up the stack to exit.
"""
def mistakenMethod(self):
"""
This method mistakenly invokes L{returnValue}, despite the fact that it
is not decorated with L{inlineCallbacks}.
"""
returnValue(1)
def assertMistakenMethodWarning(self, resultList):
"""
Flush the current warnings and assert that we have been told that
C{mistakenMethod} was invoked, and that the result from the Deferred
that was fired (appended to the given list) is C{mistakenMethod}'s
result. The warning should indicate that an inlineCallbacks function
called 'inline' was made to exit.
"""
self.assertEqual(resultList, [1])
warnings = self.flushWarnings(offendingFunctions=[self.mistakenMethod])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"returnValue() in 'mistakenMethod' causing 'inline' to exit: "
"returnValue should only be invoked by functions decorated with "
"inlineCallbacks")
def test_returnValueNonLocalWarning(self):
"""
L{returnValue} will emit a non-local exit warning in the simplest case,
where the offending function is invoked immediately.
"""
@inlineCallbacks
def inline():
self.mistakenMethod()
returnValue(2)
yield 0
d = inline()
results = []
d.addCallback(results.append)
self.assertMistakenMethodWarning(results)
def test_returnValueNonLocalDeferred(self):
"""
L{returnValue} will emit a non-local warning in the case where the
L{inlineCallbacks}-decorated function has already yielded a Deferred
and therefore moved its generator function along.
"""
cause = Deferred()
@inlineCallbacks
def inline():
yield cause
self.mistakenMethod()
returnValue(2)
effect = inline()
results = []
effect.addCallback(results.append)
self.assertEqual(results, [])
cause.callback(1)
self.assertMistakenMethodWarning(results)
class ForwardTraceBackTests(SynchronousTestCase):
def test_forwardTracebacks(self):
"""
Chained inlineCallbacks are forwarding the traceback information
from generator to generator.
A first simple test with a couple of inline callbacks.
"""
@inlineCallbacks
def erroring():
yield "forcing generator"
raise Exception('Error Marker')
@inlineCallbacks
def calling():
yield erroring()
d = calling()
f = self.failureResultOf(d)
tb = f.getTraceback()
self.assertIn("in erroring", tb)
self.assertIn("in calling", tb)
self.assertIn("Error Marker", tb)
def test_forwardLotsOfTracebacks(self):
"""
Several Chained inlineCallbacks gives information about all generators.
A wider test with a 4 chained inline callbacks.
Application stack-trace should be reported, and implementation details
like "throwExceptionIntoGenerator" symbols are omitted from the stack.
Note that the previous test is testing the simple case, and this one is
testing the deep recursion case.
That case needs specific code in failure.py to accomodate to stack
breakage introduced by throwExceptionIntoGenerator.
Hence we keep the two tests in order to sort out which code we
might have regression in.
"""
@inlineCallbacks
def erroring():
yield "forcing generator"
raise Exception('Error Marker')
@inlineCallbacks
def calling3():
yield erroring()
@inlineCallbacks
def calling2():
yield calling3()
@inlineCallbacks
def calling():
yield calling2()
d = calling()
f = self.failureResultOf(d)
tb = f.getTraceback()
self.assertIn("in erroring", tb)
self.assertIn("in calling", tb)
self.assertIn("in calling2", tb)
self.assertIn("in calling3", tb)
self.assertNotIn("throwExceptionIntoGenerator", tb)
self.assertIn("Error Marker", tb)
self.assertIn("in erroring", f.getTraceback())
class UntranslatedError(Exception):
"""
Untranslated exception type when testing an exception translation.
"""
class TranslatedError(Exception):
"""
Translated exception type when testing an exception translation.
"""
class DontFail(Exception):
"""
Sample exception type.
"""
def __init__(self, actual):
Exception.__init__(self)
self.actualValue = actual
class CancellationTests(SynchronousTestCase):
"""
Tests for cancellation of L{Deferred}s returned by L{inlineCallbacks}.
For each of these tests, let:
- C{G} be a generator decorated with C{inlineCallbacks}
- C{D} be a L{Deferred} returned by C{G}
- C{C} be a L{Deferred} awaited by C{G} with C{yield}
"""
def setUp(self):
"""
Set up the list of outstanding L{Deferred}s.
"""
self.deferredsOutstanding = []
def tearDown(self):
"""
If any L{Deferred}s are still outstanding, fire them.
"""
while self.deferredsOutstanding:
self.deferredGotten()
@inlineCallbacks
def sampleInlineCB(self, getChildDeferred=None):
"""
Generator for testing cascade cancelling cases.
@param getChildDeferred: Some callable returning L{Deferred} that we
awaiting (with C{yield})
"""
if getChildDeferred is None:
getChildDeferred = self.getDeferred
try:
x = yield getChildDeferred()
except UntranslatedError:
raise TranslatedError()
except DontFail as df:
x = df.actualValue - 2
returnValue(x + 1)
def getDeferred(self):
"""
A sample function that returns a L{Deferred} that can be fired on
demand, by L{CancellationTests.deferredGotten}.
@return: L{Deferred} that can be fired on demand.
"""
self.deferredsOutstanding.append(Deferred())
return self.deferredsOutstanding[-1]
def deferredGotten(self, result=None):
"""
Fire the L{Deferred} returned from the least-recent call to
L{CancellationTests.getDeferred}.
@param result: result object to be used when firing the L{Deferred}.
"""
self.deferredsOutstanding.pop(0).callback(result)
def test_cascadeCancellingOnCancel(self):
"""
When C{D} cancelled, C{C} will be immediately cancelled too.
"""
childResultHolder = ['FAILURE']
def getChildDeferred():
d = Deferred()
def _eb(result):
childResultHolder[0] = result.check(CancelledError)
return result
d.addErrback(_eb)
return d
d = self.sampleInlineCB(getChildDeferred=getChildDeferred)
d.addErrback(lambda result: None)
d.cancel()
self.assertEqual(
childResultHolder[0],
CancelledError,
"no cascade cancelling occurs",
)
def test_errbackCancelledErrorOnCancel(self):
"""
When C{D} cancelled, CancelledError from C{C} will be errbacked
through C{D}.
"""
d = self.sampleInlineCB()
d.cancel()
self.assertRaises(
CancelledError,
self.failureResultOf(d).raiseException,
)
def test_errorToErrorTranslation(self):
"""
When C{D} is cancelled, and C raises a particular type of error, C{G}
may catch that error at the point of yielding and translate it into
a different error which may be received by application code.
"""
def cancel(it):
it.errback(UntranslatedError())
a = Deferred(cancel)
d = self.sampleInlineCB(lambda: a)
d.cancel()
self.assertRaises(
TranslatedError,
self.failureResultOf(d).raiseException,
)
def test_errorToSuccessTranslation(self):
"""
When C{D} is cancelled, and C{C} raises a particular type of error,
C{G} may catch that error at the point of yielding and translate it
into a result value which may be received by application code.
"""
def cancel(it):
it.errback(DontFail(4321))
a = Deferred(cancel)
d = self.sampleInlineCB(lambda: a)
results = []
d.addCallback(results.append)
d.cancel()
self.assertEquals(results, [4320])
def test_asynchronousCancellation(self):
"""
When C{D} is cancelled, it won't reach the callbacks added to it by
application code until C{C} reaches the point in its callback chain
where C{G} awaits it. Otherwise, application code won't be able to
track resource usage that C{D} may be using.
"""
moreDeferred = Deferred()
def deferMeMore(result):
result.trap(CancelledError)
return moreDeferred
def deferMe():
d = Deferred()
d.addErrback(deferMeMore)
return d
d = self.sampleInlineCB(getChildDeferred=deferMe)
d.cancel()
self.assertNoResult(d)
moreDeferred.callback(6543)
self.assertEqual(self.successResultOf(d), 6544)

View file

@ -0,0 +1,503 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the inotify wrapper in L{twisted.internet.inotify}.
"""
import sys
from twisted.internet import defer, reactor
from twisted.python import filepath, runtime
from twisted.python.reflect import requireModule
from twisted.trial import unittest
if requireModule('twisted.python._inotify') is not None:
from twisted.internet import inotify
else:
inotify = None
class INotifyTests(unittest.TestCase):
"""
Define all the tests for the basic functionality exposed by
L{inotify.INotify}.
"""
if not runtime.platform.supportsINotify():
skip = "This platform doesn't support INotify."
def setUp(self):
self.dirname = filepath.FilePath(self.mktemp())
self.dirname.createDirectory()
self.inotify = inotify.INotify()
self.inotify.startReading()
self.addCleanup(self.inotify.loseConnection)
def test_initializationErrors(self):
"""
L{inotify.INotify} emits a C{RuntimeError} when initialized
in an environment that doesn't support inotify as we expect it.
We just try to raise an exception for every possible case in
the for loop in L{inotify.INotify._inotify__init__}.
"""
class FakeINotify:
def init(self):
raise inotify.INotifyError()
self.patch(inotify.INotify, '_inotify', FakeINotify())
self.assertRaises(inotify.INotifyError, inotify.INotify)
def _notificationTest(self, mask, operation, expectedPath=None):
"""
Test notification from some filesystem operation.
@param mask: The event mask to use when setting up the watch.
@param operation: A function which will be called with the
name of a file in the watched directory and which should
trigger the event.
@param expectedPath: Optionally, the name of the path which is
expected to come back in the notification event; this will
also be passed to C{operation} (primarily useful when the
operation is being done to the directory itself, not a
file in it).
@return: A L{Deferred} which fires successfully when the
expected event has been received or fails otherwise.
"""
if expectedPath is None:
expectedPath = self.dirname.child("foo.bar")
notified = defer.Deferred()
def cbNotified(result):
(watch, filename, events) = result
self.assertEqual(filename.asBytesMode(), expectedPath.asBytesMode())
self.assertTrue(events & mask)
notified.addCallback(cbNotified)
self.inotify.watch(
self.dirname, mask=mask,
callbacks=[lambda *args: notified.callback(args)])
operation(expectedPath)
return notified
def test_access(self):
"""
Reading from a file in a monitored directory sends an
C{inotify.IN_ACCESS} event to the callback.
"""
def operation(path):
path.setContent(b"foo")
path.getContent()
return self._notificationTest(inotify.IN_ACCESS, operation)
def test_modify(self):
"""
Writing to a file in a monitored directory sends an
C{inotify.IN_MODIFY} event to the callback.
"""
def operation(path):
with path.open("w") as fObj:
fObj.write(b'foo')
return self._notificationTest(inotify.IN_MODIFY, operation)
def test_attrib(self):
"""
Changing the metadata of a file in a monitored directory
sends an C{inotify.IN_ATTRIB} event to the callback.
"""
def operation(path):
path.touch()
path.touch()
return self._notificationTest(inotify.IN_ATTRIB, operation)
def test_closeWrite(self):
"""
Closing a file which was open for writing in a monitored
directory sends an C{inotify.IN_CLOSE_WRITE} event to the
callback.
"""
def operation(path):
path.open("w").close()
return self._notificationTest(inotify.IN_CLOSE_WRITE, operation)
def test_closeNoWrite(self):
"""
Closing a file which was open for reading but not writing in a
monitored directory sends an C{inotify.IN_CLOSE_NOWRITE} event
to the callback.
"""
def operation(path):
path.touch()
path.open("r").close()
return self._notificationTest(inotify.IN_CLOSE_NOWRITE, operation)
def test_open(self):
"""
Opening a file in a monitored directory sends an
C{inotify.IN_OPEN} event to the callback.
"""
def operation(path):
path.open("w").close()
return self._notificationTest(inotify.IN_OPEN, operation)
def test_movedFrom(self):
"""
Moving a file out of a monitored directory sends an
C{inotify.IN_MOVED_FROM} event to the callback.
"""
def operation(path):
path.open("w").close()
path.moveTo(filepath.FilePath(self.mktemp()))
return self._notificationTest(inotify.IN_MOVED_FROM, operation)
def test_movedTo(self):
"""
Moving a file into a monitored directory sends an
C{inotify.IN_MOVED_TO} event to the callback.
"""
def operation(path):
p = filepath.FilePath(self.mktemp())
p.touch()
p.moveTo(path)
return self._notificationTest(inotify.IN_MOVED_TO, operation)
def test_create(self):
"""
Creating a file in a monitored directory sends an
C{inotify.IN_CREATE} event to the callback.
"""
def operation(path):
path.open("w").close()
return self._notificationTest(inotify.IN_CREATE, operation)
def test_delete(self):
"""
Deleting a file in a monitored directory sends an
C{inotify.IN_DELETE} event to the callback.
"""
def operation(path):
path.touch()
path.remove()
return self._notificationTest(inotify.IN_DELETE, operation)
def test_deleteSelf(self):
"""
Deleting the monitored directory itself sends an
C{inotify.IN_DELETE_SELF} event to the callback.
"""
def operation(path):
path.remove()
return self._notificationTest(
inotify.IN_DELETE_SELF, operation, expectedPath=self.dirname)
def test_moveSelf(self):
"""
Renaming the monitored directory itself sends an
C{inotify.IN_MOVE_SELF} event to the callback.
"""
def operation(path):
path.moveTo(filepath.FilePath(self.mktemp()))
return self._notificationTest(
inotify.IN_MOVE_SELF, operation, expectedPath=self.dirname)
def test_simpleSubdirectoryAutoAdd(self):
"""
L{inotify.INotify} when initialized with autoAdd==True adds
also adds the created subdirectories to the watchlist.
"""
def _callback(wp, filename, mask):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assertTrue(self.inotify._isWatched(subdir))
d.callback(None)
except Exception:
d.errback()
reactor.callLater(0, _)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=True,
callbacks=[_callback])
subdir = self.dirname.child('test')
d = defer.Deferred()
subdir.createDirectory()
return d
def test_simpleDeleteDirectory(self):
"""
L{inotify.INotify} removes a directory from the watchlist when
it's removed from the filesystem.
"""
calls = []
def _callback(wp, filename, mask):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assertTrue(self.inotify._isWatched(subdir))
subdir.remove()
except Exception:
d.errback()
def _eb():
# second call, we have just removed the subdir
try:
self.assertFalse(self.inotify._isWatched(subdir))
d.callback(None)
except Exception:
d.errback()
if not calls:
# first call, it's the create subdir
calls.append(filename)
reactor.callLater(0, _)
else:
reactor.callLater(0, _eb)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=True,
callbacks=[_callback])
subdir = self.dirname.child('test')
d = defer.Deferred()
subdir.createDirectory()
return d
def test_ignoreDirectory(self):
"""
L{inotify.INotify.ignore} removes a directory from the watchlist
"""
self.inotify.watch(self.dirname, autoAdd=True)
self.assertTrue(self.inotify._isWatched(self.dirname))
self.inotify.ignore(self.dirname)
self.assertFalse(self.inotify._isWatched(self.dirname))
def test_humanReadableMask(self):
"""
L{inotify.humaReadableMask} translates all the possible event
masks to a human readable string.
"""
for mask, value in inotify._FLAG_TO_HUMAN:
self.assertEqual(inotify.humanReadableMask(mask)[0], value)
checkMask = (
inotify.IN_CLOSE_WRITE | inotify.IN_ACCESS | inotify.IN_OPEN)
self.assertEqual(
set(inotify.humanReadableMask(checkMask)),
set(['close_write', 'access', 'open']))
def test_recursiveWatch(self):
"""
L{inotify.INotify.watch} with recursive==True will add all the
subdirectories under the given path to the watchlist.
"""
subdir = self.dirname.child('test')
subdir2 = subdir.child('test2')
subdir3 = subdir2.child('test3')
subdir3.makedirs()
dirs = [subdir, subdir2, subdir3]
self.inotify.watch(self.dirname, recursive=True)
# let's even call this twice so that we test that nothing breaks
self.inotify.watch(self.dirname, recursive=True)
for d in dirs:
self.assertTrue(self.inotify._isWatched(d))
def test_connectionLostError(self):
"""
L{inotify.INotify.connectionLost} if there's a problem while closing
the fd shouldn't raise the exception but should log the error
"""
import os
in_ = inotify.INotify()
os.close(in_._fd)
in_.loseConnection()
self.flushLoggedErrors()
def test_noAutoAddSubdirectory(self):
"""
L{inotify.INotify.watch} with autoAdd==False will stop inotify
from watching subdirectories created under the watched one.
"""
def _callback(wp, fp, mask):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assertFalse(self.inotify._isWatched(subdir))
d.callback(None)
except Exception:
d.errback()
reactor.callLater(0, _)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=False,
callbacks=[_callback])
subdir = self.dirname.child('test')
d = defer.Deferred()
subdir.createDirectory()
return d
def test_seriesOfWatchAndIgnore(self):
"""
L{inotify.INotify} will watch a filepath for events even if the same
path is repeatedly added/removed/re-added to the watchpoints.
"""
expectedPath = self.dirname.child("foo.bar2")
expectedPath.touch()
notified = defer.Deferred()
def cbNotified(result):
(ignored, filename, events) = result
self.assertEqual(filename.asBytesMode(), expectedPath.asBytesMode())
self.assertTrue(events & inotify.IN_DELETE_SELF)
def callIt(*args):
notified.callback(args)
# Watch, ignore, watch again to get into the state being tested.
self.assertTrue(self.inotify.watch(expectedPath, callbacks=[callIt]))
self.inotify.ignore(expectedPath)
self.assertTrue(
self.inotify.watch(
expectedPath, mask=inotify.IN_DELETE_SELF, callbacks=[callIt]))
notified.addCallback(cbNotified)
# Apparently in kernel version < 2.6.25, inofify has a bug in the way
# similar events are coalesced. So, be sure to generate a different
# event here than the touch() at the top of this method might have
# generated.
expectedPath.remove()
return notified
def test_ignoreFilePath(self):
"""
L{inotify.INotify} will ignore a filepath after it has been removed from
the watch list.
"""
expectedPath = self.dirname.child("foo.bar2")
expectedPath.touch()
expectedPath2 = self.dirname.child("foo.bar3")
expectedPath2.touch()
notified = defer.Deferred()
def cbNotified(result):
(ignored, filename, events) = result
self.assertEqual(filename.asBytesMode(), expectedPath2.asBytesMode())
self.assertTrue(events & inotify.IN_DELETE_SELF)
def callIt(*args):
notified.callback(args)
self.assertTrue(
self.inotify.watch(
expectedPath, inotify.IN_DELETE_SELF, callbacks=[callIt]))
notified.addCallback(cbNotified)
self.assertTrue(
self.inotify.watch(
expectedPath2, inotify.IN_DELETE_SELF, callbacks=[callIt]))
self.inotify.ignore(expectedPath)
expectedPath.remove()
expectedPath2.remove()
return notified
def test_ignoreNonWatchedFile(self):
"""
L{inotify.INotify} will raise KeyError if a non-watched filepath is
ignored.
"""
expectedPath = self.dirname.child("foo.ignored")
expectedPath.touch()
self.assertRaises(KeyError, self.inotify.ignore, expectedPath)
def test_complexSubdirectoryAutoAdd(self):
"""
L{inotify.INotify} with autoAdd==True for a watched path
generates events for every file or directory already present
in a newly created subdirectory under the watched one.
This tests that we solve a race condition in inotify even though
we may generate duplicate events.
"""
calls = set()
def _callback(wp, filename, mask):
calls.add(filename)
if len(calls) == 6:
try:
self.assertTrue(self.inotify._isWatched(subdir))
self.assertTrue(self.inotify._isWatched(subdir2))
self.assertTrue(self.inotify._isWatched(subdir3))
created = someFiles + [subdir, subdir2, subdir3]
created = {f.asBytesMode() for f in created}
self.assertEqual(len(calls), len(created))
self.assertEqual(calls, created)
except Exception:
d.errback()
else:
d.callback(None)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=True,
callbacks=[_callback])
subdir = self.dirname.child('test')
subdir2 = subdir.child('test2')
subdir3 = subdir2.child('test3')
d = defer.Deferred()
subdir3.makedirs()
someFiles = [subdir.child('file1.dat'),
subdir2.child('file2.dat'),
subdir3.child('file3.dat')]
# Add some files in pretty much all the directories so that we
# see that we process all of them.
for i, filename in enumerate(someFiles):
filename.setContent(
filename.path.encode(sys.getfilesystemencoding()))
return d

View file

@ -0,0 +1,150 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.iocpreactor}.
"""
import errno
from array import array
from struct import pack
from socket import AF_INET6, AF_INET, SOCK_STREAM, SOL_SOCKET, error, socket
from zope.interface.verify import verifyClass
from twisted.trial import unittest
from twisted.python.log import msg
from twisted.internet.interfaces import IPushProducer
try:
from twisted.internet.iocpreactor import iocpsupport as _iocp, tcp, udp
from twisted.internet.iocpreactor.reactor import IOCPReactor, EVENTS_PER_LOOP, KEY_NORMAL
from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
from twisted.internet.iocpreactor.const import SO_UPDATE_ACCEPT_CONTEXT
from twisted.internet.iocpreactor.abstract import FileHandle
except ImportError:
skip = 'This test only applies to IOCPReactor'
try:
socket(AF_INET6, SOCK_STREAM).close()
except error as e:
ipv6Skip = str(e)
else:
ipv6Skip = None
class SupportTests(unittest.TestCase):
"""
Tests for L{twisted.internet.iocpreactor.iocpsupport}, low-level reactor
implementation helpers.
"""
def _acceptAddressTest(self, family, localhost):
"""
Create a C{SOCK_STREAM} connection to localhost using a socket with an
address family of C{family} and assert that the result of
L{iocpsupport.get_accept_addrs} is consistent with the result of
C{socket.getsockname} and C{socket.getpeername}.
"""
msg("family = %r" % (family,))
port = socket(family, SOCK_STREAM)
self.addCleanup(port.close)
port.bind(('', 0))
port.listen(1)
client = socket(family, SOCK_STREAM)
self.addCleanup(client.close)
client.setblocking(False)
try:
client.connect((localhost, port.getsockname()[1]))
except error as e:
self.assertIn(e.errno, (errno.EINPROGRESS, errno.EWOULDBLOCK))
server = socket(family, SOCK_STREAM)
self.addCleanup(server.close)
buff = array('B', b'\0' * 256)
self.assertEqual(
0, _iocp.accept(port.fileno(), server.fileno(), buff, None))
server.setsockopt(
SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, pack('P', port.fileno()))
self.assertEqual(
(family, client.getpeername()[:2], client.getsockname()[:2]),
_iocp.get_accept_addrs(server.fileno(), buff))
def test_ipv4AcceptAddress(self):
"""
L{iocpsupport.get_accept_addrs} returns a three-tuple of address
information about the socket associated with the file descriptor passed
to it. For a connection using IPv4:
- the first element is C{AF_INET}
- the second element is a two-tuple of a dotted decimal notation IPv4
address and a port number giving the peer address of the connection
- the third element is the same type giving the host address of the
connection
"""
self._acceptAddressTest(AF_INET, '127.0.0.1')
def test_ipv6AcceptAddress(self):
"""
Like L{test_ipv4AcceptAddress}, but for IPv6 connections. In this case:
- the first element is C{AF_INET6}
- the second element is a two-tuple of a hexadecimal IPv6 address
literal and a port number giving the peer address of the connection
- the third element is the same type giving the host address of the
connection
"""
self._acceptAddressTest(AF_INET6, '::1')
if ipv6Skip is not None:
test_ipv6AcceptAddress.skip = ipv6Skip
class IOCPReactorTests(unittest.TestCase):
def test_noPendingTimerEvents(self):
"""
Test reactor behavior (doIteration) when there are no pending time
events.
"""
ir = IOCPReactor()
ir.wakeUp()
self.assertFalse(ir.doIteration(None))
def test_reactorInterfaces(self):
"""
Verify that IOCP socket-representing classes implement IReadWriteHandle
"""
self.assertTrue(verifyClass(IReadWriteHandle, tcp.Connection))
self.assertTrue(verifyClass(IReadWriteHandle, udp.Port))
def test_fileHandleInterfaces(self):
"""
Verify that L{Filehandle} implements L{IPushProducer}.
"""
self.assertTrue(verifyClass(IPushProducer, FileHandle))
def test_maxEventsPerIteration(self):
"""
Verify that we don't lose an event when more than EVENTS_PER_LOOP
events occur in the same reactor iteration
"""
class FakeFD:
counter = 0
def logPrefix(self):
return 'FakeFD'
def cb(self, rc, bytes, evt):
self.counter += 1
ir = IOCPReactor()
fd = FakeFD()
event = _iocp.Event(fd.cb, fd)
for _ in range(EVENTS_PER_LOOP + 1):
ir.port.postEvent(0, KEY_NORMAL, event)
ir.doIteration(None)
self.assertEqual(fd.counter, EVENTS_PER_LOOP)
ir.doIteration(0)
self.assertEqual(fd.counter, EVENTS_PER_LOOP + 1)

View file

@ -0,0 +1,71 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.kqueuereactor}.
"""
from __future__ import division, absolute_import
import errno
from zope.interface import implementer
from twisted.trial.unittest import TestCase
try:
from twisted.internet.kqreactor import KQueueReactor, _IKQueue
kqueueSkip = None
except ImportError:
kqueueSkip = "KQueue not available."
def _fakeKEvent(*args, **kwargs):
"""
Do nothing.
"""
def makeFakeKQueue(testKQueue, testKEvent):
"""
Create a fake that implements L{_IKQueue}.
@param testKQueue: Something that acts like L{select.kqueue}.
@param testKEvent: Something that acts like L{select.kevent}.
@return: An implementation of L{_IKQueue} that includes C{testKQueue} and
C{testKEvent}.
"""
@implementer(_IKQueue)
class FakeKQueue(object):
kqueue = testKQueue
kevent = testKEvent
return FakeKQueue()
class KQueueTests(TestCase):
"""
These are tests for L{KQueueReactor}'s implementation, not its real world
behaviour. For that, look at
L{twisted.internet.test.reactormixins.ReactorBuilder}.
"""
skip = kqueueSkip
def test_EINTR(self):
"""
L{KQueueReactor} handles L{errno.EINTR} in C{doKEvent} by returning.
"""
class FakeKQueue(object):
"""
A fake KQueue that raises L{errno.EINTR} when C{control} is called,
like a real KQueue would if it was interrupted.
"""
def control(self, *args, **kwargs):
raise OSError(errno.EINTR, "Interrupted")
reactor = KQueueReactor(makeFakeKQueue(FakeKQueue, _fakeKEvent))
# This should return cleanly -- should not raise the OSError we're
# spawning, nor get upset and raise about the incomplete KQueue fake.
reactor.doKEvent(0)

View file

@ -0,0 +1,50 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.main}.
"""
from __future__ import division, absolute_import
from twisted.trial import unittest
from twisted.internet.error import ReactorAlreadyInstalledError
from twisted.internet.main import installReactor
from twisted.internet.test.modulehelpers import NoReactor
class InstallReactorTests(unittest.SynchronousTestCase):
"""
Tests for L{installReactor}.
"""
def test_installReactor(self):
"""
L{installReactor} installs a new reactor if none is present.
"""
with NoReactor():
newReactor = object()
installReactor(newReactor)
from twisted.internet import reactor
self.assertIs(newReactor, reactor)
def test_alreadyInstalled(self):
"""
If a reactor is already installed, L{installReactor} raises
L{ReactorAlreadyInstalledError}.
"""
with NoReactor():
installReactor(object())
self.assertRaises(ReactorAlreadyInstalledError, installReactor,
object())
def test_errorIsAnAssertionError(self):
"""
For backwards compatibility, L{ReactorAlreadyInstalledError} is an
L{AssertionError}.
"""
self.assertTrue(issubclass(ReactorAlreadyInstalledError,
AssertionError))

View file

@ -0,0 +1,200 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet._newtls}.
"""
from __future__ import division, absolute_import
from twisted.trial import unittest
from twisted.internet import interfaces
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.test.connectionmixins import (
ConnectableProtocol, runProtocolsWithReactor)
from twisted.internet.test.test_tls import SSLCreator, TLSMixin
from twisted.internet.test.test_tls import StartTLSClientCreator
from twisted.internet.test.test_tls import ContextGeneratingMixin
from twisted.internet.test.test_tcp import TCPCreator
try:
from twisted.protocols import tls
from twisted.internet import _newtls
except ImportError:
_newtls = None
from zope.interface import implementer
class BypassTLSTests(unittest.TestCase):
"""
Tests for the L{_newtls._BypassTLS} class.
"""
if not _newtls:
skip = "Couldn't import _newtls, perhaps pyOpenSSL is old or missing"
def test_loseConnectionPassThrough(self):
"""
C{_BypassTLS.loseConnection} calls C{loseConnection} on the base
class, while preserving any default argument in the base class'
C{loseConnection} implementation.
"""
default = object()
result = []
class FakeTransport(object):
def loseConnection(self, _connDone=default):
result.append(_connDone)
bypass = _newtls._BypassTLS(FakeTransport, FakeTransport())
# The default from FakeTransport is used:
bypass.loseConnection()
self.assertEqual(result, [default])
# And we can pass our own:
notDefault = object()
bypass.loseConnection(notDefault)
self.assertEqual(result, [default, notDefault])
class FakeProducer(object):
"""
A producer that does nothing.
"""
def pauseProducing(self):
pass
def resumeProducing(self):
pass
def stopProducing(self):
pass
@implementer(interfaces.IHandshakeListener)
class ProducerProtocol(ConnectableProtocol):
"""
Register a producer, unregister it, and verify the producer hooks up to
innards of C{TLSMemoryBIOProtocol}.
"""
def __init__(self, producer, result):
self.producer = producer
self.result = result
def handshakeCompleted(self):
if not isinstance(self.transport.protocol,
tls.TLSMemoryBIOProtocol):
# Either the test or the code have a bug...
raise RuntimeError("TLSMemoryBIOProtocol not hooked up.")
self.transport.registerProducer(self.producer, True)
# The producer was registered with the TLSMemoryBIOProtocol:
self.result.append(self.transport.protocol._producer._producer)
self.transport.unregisterProducer()
# The producer was unregistered from the TLSMemoryBIOProtocol:
self.result.append(self.transport.protocol._producer)
self.transport.loseConnection()
class ProducerTestsMixin(ReactorBuilder, TLSMixin, ContextGeneratingMixin):
"""
Test the new TLS code integrates C{TLSMemoryBIOProtocol} correctly.
"""
if not _newtls:
skip = "Could not import twisted.internet._newtls"
def test_producerSSLFromStart(self):
"""
C{registerProducer} and C{unregisterProducer} on TLS transports
created as SSL from the get go are passed to the
C{TLSMemoryBIOProtocol}, not the underlying transport directly.
"""
result = []
producer = FakeProducer()
runProtocolsWithReactor(self, ConnectableProtocol(),
ProducerProtocol(producer, result),
SSLCreator())
self.assertEqual(result, [producer, None])
def test_producerAfterStartTLS(self):
"""
C{registerProducer} and C{unregisterProducer} on TLS transports
created by C{startTLS} are passed to the C{TLSMemoryBIOProtocol}, not
the underlying transport directly.
"""
result = []
producer = FakeProducer()
runProtocolsWithReactor(self, ConnectableProtocol(),
ProducerProtocol(producer, result),
StartTLSClientCreator())
self.assertEqual(result, [producer, None])
def startTLSAfterRegisterProducer(self, streaming):
"""
When a producer is registered, and then startTLS is called,
the producer is re-registered with the C{TLSMemoryBIOProtocol}.
"""
clientContext = self.getClientContext()
serverContext = self.getServerContext()
result = []
producer = FakeProducer()
class RegisterTLSProtocol(ConnectableProtocol):
def connectionMade(self):
self.transport.registerProducer(producer, streaming)
self.transport.startTLS(serverContext)
# Store TLSMemoryBIOProtocol and underlying transport producer
# status:
if streaming:
# _ProducerMembrane -> producer:
result.append(self.transport.protocol._producer._producer)
result.append(self.transport.producer._producer)
else:
# _ProducerMembrane -> _PullToPush -> producer:
result.append(
self.transport.protocol._producer._producer._producer)
result.append(self.transport.producer._producer._producer)
self.transport.unregisterProducer()
self.transport.loseConnection()
class StartTLSProtocol(ConnectableProtocol):
def connectionMade(self):
self.transport.startTLS(clientContext)
runProtocolsWithReactor(self, RegisterTLSProtocol(),
StartTLSProtocol(), TCPCreator())
self.assertEqual(result, [producer, producer])
def test_startTLSAfterRegisterProducerStreaming(self):
"""
When a streaming producer is registered, and then startTLS is called,
the producer is re-registered with the C{TLSMemoryBIOProtocol}.
"""
self.startTLSAfterRegisterProducer(True)
def test_startTLSAfterRegisterProducerNonStreaming(self):
"""
When a non-streaming producer is registered, and then startTLS is
called, the producer is re-registered with the
C{TLSMemoryBIOProtocol}.
"""
self.startTLSAfterRegisterProducer(False)
globals().update(ProducerTestsMixin.makeTestCaseClasses())

View file

@ -0,0 +1,46 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet._pollingfile}.
"""
from twisted.python.runtime import platform
from twisted.trial.unittest import TestCase
if platform.isWindows():
from twisted.internet import _pollingfile
else:
_pollingfile = None
class PollableWritePipeTests(TestCase):
"""
Tests for L{_pollingfile._PollableWritePipe}.
"""
def test_writeUnicode(self):
"""
L{_pollingfile._PollableWritePipe.write} raises a C{TypeError} if an
attempt is made to append unicode data to the output buffer.
"""
p = _pollingfile._PollableWritePipe(1, lambda: None)
self.assertRaises(TypeError, p.write, u"test")
def test_writeSequenceUnicode(self):
"""
L{_pollingfile._PollableWritePipe.writeSequence} raises a C{TypeError}
if unicode data is part of the data sequence to be appended to the
output buffer.
"""
p = _pollingfile._PollableWritePipe(1, lambda: None)
self.assertRaises(TypeError, p.writeSequence, [u"test"])
self.assertRaises(TypeError, p.writeSequence, (u"test", ))
if _pollingfile is None:
PollableWritePipeTests.skip = "Test will run only on Windows."

View file

@ -0,0 +1,316 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.posixbase} and supporting code.
"""
from __future__ import division, absolute_import
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.internet.posixbase import PosixReactorBase, _Waker
from twisted.internet.protocol import ServerFactory
skipSockets = None
try:
from twisted.internet import unix
from twisted.test.test_unix import ClientProto
except ImportError:
skipSockets = "Platform does not support AF_UNIX sockets"
from twisted.internet.tcp import Port
from twisted.internet import reactor
class TrivialReactor(PosixReactorBase):
def __init__(self):
self._readers = {}
self._writers = {}
PosixReactorBase.__init__(self)
def addReader(self, reader):
self._readers[reader] = True
def removeReader(self, reader):
del self._readers[reader]
def addWriter(self, writer):
self._writers[writer] = True
def removeWriter(self, writer):
del self._writers[writer]
class PosixReactorBaseTests(TestCase):
"""
Tests for L{PosixReactorBase}.
"""
def _checkWaker(self, reactor):
self.assertIsInstance(reactor.waker, _Waker)
self.assertIn(reactor.waker, reactor._internalReaders)
self.assertIn(reactor.waker, reactor._readers)
def test_wakerIsInternalReader(self):
"""
When L{PosixReactorBase} is instantiated, it creates a waker and adds
it to its internal readers set.
"""
reactor = TrivialReactor()
self._checkWaker(reactor)
def test_removeAllSkipsInternalReaders(self):
"""
Any L{IReadDescriptors} in L{PosixReactorBase._internalReaders} are
left alone by L{PosixReactorBase._removeAll}.
"""
reactor = TrivialReactor()
extra = object()
reactor._internalReaders.add(extra)
reactor.addReader(extra)
reactor._removeAll(reactor._readers, reactor._writers)
self._checkWaker(reactor)
self.assertIn(extra, reactor._internalReaders)
self.assertIn(extra, reactor._readers)
def test_removeAllReturnsRemovedDescriptors(self):
"""
L{PosixReactorBase._removeAll} returns a list of removed
L{IReadDescriptor} and L{IWriteDescriptor} objects.
"""
reactor = TrivialReactor()
reader = object()
writer = object()
reactor.addReader(reader)
reactor.addWriter(writer)
removed = reactor._removeAll(
reactor._readers, reactor._writers)
self.assertEqual(set(removed), set([reader, writer]))
self.assertNotIn(reader, reactor._readers)
self.assertNotIn(writer, reactor._writers)
class TCPPortTests(TestCase):
"""
Tests for L{twisted.internet.tcp.Port}.
"""
if not isinstance(reactor, PosixReactorBase):
skip = "Non-posixbase reactor"
def test_connectionLostFailed(self):
"""
L{Port.stopListening} returns a L{Deferred} which errbacks if
L{Port.connectionLost} raises an exception.
"""
port = Port(12345, ServerFactory())
port.connected = True
port.connectionLost = lambda reason: 1 // 0
return self.assertFailure(port.stopListening(), ZeroDivisionError)
class TimeoutReportReactor(PosixReactorBase):
"""
A reactor which is just barely runnable and which cannot monitor any
readers or writers, and which fires a L{Deferred} with the timeout
passed to its C{doIteration} method as soon as that method is invoked.
"""
def __init__(self):
PosixReactorBase.__init__(self)
self.iterationTimeout = Deferred()
self.now = 100
def addReader(self, reader):
"""
Ignore the reader. This is necessary because the waker will be
added. However, we won't actually monitor it for any events.
"""
def removeAll(self):
"""
There are no readers or writers, so there is nothing to remove.
This will be called when the reactor stops, though, so it must be
implemented.
"""
return []
def seconds(self):
"""
Override the real clock with a deterministic one that can be easily
controlled in a unit test.
"""
return self.now
def doIteration(self, timeout):
d = self.iterationTimeout
if d is not None:
self.iterationTimeout = None
d.callback(timeout)
class IterationTimeoutTests(TestCase):
"""
Tests for the timeout argument L{PosixReactorBase.run} calls
L{PosixReactorBase.doIteration} with in the presence of various delayed
calls.
"""
def _checkIterationTimeout(self, reactor):
timeout = []
reactor.iterationTimeout.addCallback(timeout.append)
reactor.iterationTimeout.addCallback(lambda ignored: reactor.stop())
reactor.run()
return timeout[0]
def test_noCalls(self):
"""
If there are no delayed calls, C{doIteration} is called with a
timeout of L{None}.
"""
reactor = TimeoutReportReactor()
timeout = self._checkIterationTimeout(reactor)
self.assertIsNone(timeout)
def test_delayedCall(self):
"""
If there is a delayed call, C{doIteration} is called with a timeout
which is the difference between the current time and the time at
which that call is to run.
"""
reactor = TimeoutReportReactor()
reactor.callLater(100, lambda: None)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 100)
def test_timePasses(self):
"""
If a delayed call is scheduled and then some time passes, the
timeout passed to C{doIteration} is reduced by the amount of time
which passed.
"""
reactor = TimeoutReportReactor()
reactor.callLater(100, lambda: None)
reactor.now += 25
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 75)
def test_multipleDelayedCalls(self):
"""
If there are several delayed calls, C{doIteration} is called with a
timeout which is the difference between the current time and the
time at which the earlier of the two calls is to run.
"""
reactor = TimeoutReportReactor()
reactor.callLater(50, lambda: None)
reactor.callLater(10, lambda: None)
reactor.callLater(100, lambda: None)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 10)
def test_resetDelayedCall(self):
"""
If a delayed call is reset, the timeout passed to C{doIteration} is
based on the interval between the time when reset is called and the
new delay of the call.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
reactor.now += 25
call.reset(15)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 15)
def test_delayDelayedCall(self):
"""
If a delayed call is re-delayed, the timeout passed to
C{doIteration} is based on the remaining time before the call would
have been made and the additional amount of time passed to the delay
method.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
reactor.now += 10
call.delay(20)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 60)
def test_cancelDelayedCall(self):
"""
If the only delayed call is canceled, L{None} is the timeout passed
to C{doIteration}.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
call.cancel()
timeout = self._checkIterationTimeout(reactor)
self.assertIsNone(timeout)
class ConnectedDatagramPortTests(TestCase):
"""
Test connected datagram UNIX sockets.
"""
if skipSockets is not None:
skip = skipSockets
def test_connectionFailedDoesntCallLoseConnection(self):
"""
L{ConnectedDatagramPort} does not call the deprecated C{loseConnection}
in L{ConnectedDatagramPort.connectionFailed}.
"""
def loseConnection():
"""
Dummy C{loseConnection} method. C{loseConnection} is deprecated and
should not get called.
"""
self.fail("loseConnection is deprecated and should not get called.")
port = unix.ConnectedDatagramPort(None, ClientProto())
port.loseConnection = loseConnection
port.connectionFailed("goodbye")
def test_connectionFailedCallsStopListening(self):
"""
L{ConnectedDatagramPort} calls L{ConnectedDatagramPort.stopListening}
instead of the deprecated C{loseConnection} in
L{ConnectedDatagramPort.connectionFailed}.
"""
self.called = False
def stopListening():
"""
Dummy C{stopListening} method.
"""
self.called = True
port = unix.ConnectedDatagramPort(None, ClientProto())
port.stopListening = stopListening
port.connectionFailed("goodbye")
self.assertTrue(self.called)

View file

@ -0,0 +1,352 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for POSIX-based L{IReactorProcess} implementations.
"""
from __future__ import division, absolute_import
import errno, os, sys
try:
import fcntl
except ImportError:
platformSkip = "non-POSIX platform"
else:
from twisted.internet import process
platformSkip = None
from twisted.python.compat import range
from twisted.trial.unittest import TestCase
class FakeFile(object):
"""
A dummy file object which records when it is closed.
"""
def __init__(self, testcase, fd):
self.testcase = testcase
self.fd = fd
def close(self):
self.testcase._files.remove(self.fd)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
class FakeResourceModule(object):
"""
Fake version of L{resource} which hard-codes a particular rlimit for maximum
open files.
@ivar _limit: The value to return for the hard limit of number of open files.
"""
RLIMIT_NOFILE = 1
def __init__(self, limit):
self._limit = limit
def getrlimit(self, no):
"""
A fake of L{resource.getrlimit} which returns a pre-determined result.
"""
if no == self.RLIMIT_NOFILE:
return [0, self._limit]
return [123, 456]
class FDDetectorTests(TestCase):
"""
Tests for _FDDetector class in twisted.internet.process, which detects
which function to drop in place for the _listOpenFDs method.
@ivar devfs: A flag indicating whether the filesystem fake will indicate
that /dev/fd exists.
@ivar accurateDevFDResults: A flag indicating whether the /dev/fd fake
returns accurate open file information.
@ivar procfs: A flag indicating whether the filesystem fake will indicate
that /proc/<pid>/fd exists.
"""
skip = platformSkip
devfs = False
accurateDevFDResults = False
procfs = False
def getpid(self):
"""
Fake os.getpid, always return the same thing
"""
return 123
def listdir(self, arg):
"""
Fake os.listdir, depending on what mode we're in to simulate behaviour.
@param arg: the directory to list
"""
accurate = map(str, self._files)
if self.procfs and arg == ('/proc/%d/fd' % (self.getpid(),)):
return accurate
if self.devfs and arg == '/dev/fd':
if self.accurateDevFDResults:
return accurate
return ["0", "1", "2"]
raise OSError()
def openfile(self, fname, mode):
"""
This is a mock for L{open}. It keeps track of opened files so extra
descriptors can be returned from the mock for L{os.listdir} when used on
one of the list-of-filedescriptors directories.
A L{FakeFile} is returned which can be closed to remove the new
descriptor from the open list.
"""
# Find the smallest unused file descriptor and give it to the new file.
f = FakeFile(self, min(set(range(1024)) - set(self._files)))
self._files.append(f.fd)
return f
def hideResourceModule(self):
"""
Make the L{resource} module unimportable for the remainder of the
current test method.
"""
sys.modules['resource'] = None
def revealResourceModule(self, limit):
"""
Make a L{FakeResourceModule} instance importable at the L{resource}
name.
@param limit: The value which will be returned for the hard limit of
number of open files by the fake resource module's C{getrlimit}
function.
"""
sys.modules['resource'] = FakeResourceModule(limit)
def replaceResourceModule(self, value):
"""
Restore the original resource module to L{sys.modules}.
"""
if value is None:
try:
del sys.modules['resource']
except KeyError:
pass
else:
sys.modules['resource'] = value
def setUp(self):
"""
Set up the tests, giving ourselves a detector object to play with and
setting up its testable knobs to refer to our mocked versions.
"""
self.detector = process._FDDetector()
self.detector.listdir = self.listdir
self.detector.getpid = self.getpid
self.detector.openfile = self.openfile
self._files = [0, 1, 2]
self.addCleanup(
self.replaceResourceModule, sys.modules.get('resource'))
def test_selectFirstWorking(self):
"""
L{FDDetector._getImplementation} returns the first method from its
C{_implementations} list which returns results which reflect a newly
opened file descriptor.
"""
def failWithException():
raise ValueError("This does not work")
def failWithWrongResults():
return [0, 1, 2]
def correct():
return self._files[:]
self.detector._implementations = [
failWithException, failWithWrongResults, correct]
self.assertIs(correct, self.detector._getImplementation())
def test_selectLast(self):
"""
L{FDDetector._getImplementation} returns the last method from its
C{_implementations} list if none of the implementations manage to return
results which reflect a newly opened file descriptor.
"""
def failWithWrongResults():
return [3, 5, 9]
def failWithOtherWrongResults():
return [0, 1, 2]
self.detector._implementations = [
failWithWrongResults, failWithOtherWrongResults]
self.assertIs(
failWithOtherWrongResults, self.detector._getImplementation())
def test_identityOfListOpenFDsChanges(self):
"""
Check that the identity of _listOpenFDs changes after running
_listOpenFDs the first time, but not after the second time it's run.
In other words, check that the monkey patching actually works.
"""
# Create a new instance
detector = process._FDDetector()
first = detector._listOpenFDs.__name__
detector._listOpenFDs()
second = detector._listOpenFDs.__name__
detector._listOpenFDs()
third = detector._listOpenFDs.__name__
self.assertNotEqual(first, second)
self.assertEqual(second, third)
def test_devFDImplementation(self):
"""
L{_FDDetector._devFDImplementation} raises L{OSError} if there is no
I{/dev/fd} directory, otherwise it returns the basenames of its children
interpreted as integers.
"""
self.devfs = False
self.assertRaises(OSError, self.detector._devFDImplementation)
self.devfs = True
self.accurateDevFDResults = False
self.assertEqual([0, 1, 2], self.detector._devFDImplementation())
def test_procFDImplementation(self):
"""
L{_FDDetector._procFDImplementation} raises L{OSError} if there is no
I{/proc/<pid>/fd} directory, otherwise it returns the basenames of its
children interpreted as integers.
"""
self.procfs = False
self.assertRaises(OSError, self.detector._procFDImplementation)
self.procfs = True
self.assertEqual([0, 1, 2], self.detector._procFDImplementation())
def test_resourceFDImplementation(self):
"""
L{_FDDetector._fallbackFDImplementation} uses the L{resource} module if
it is available, returning a range of integers from 0 to the
minimum of C{1024} and the hard I{NOFILE} limit.
"""
# When the resource module is here, use its value.
self.revealResourceModule(512)
self.assertEqual(
list(range(512)), list(self.detector._fallbackFDImplementation()))
# But limit its value to the arbitrarily selected value 1024.
self.revealResourceModule(2048)
self.assertEqual(
list(range(1024)), list(self.detector._fallbackFDImplementation()))
def test_fallbackFDImplementation(self):
"""
L{_FDDetector._fallbackFDImplementation}, the implementation of last
resort, succeeds with a fixed range of integers from 0 to 1024 when the
L{resource} module is not importable.
"""
self.hideResourceModule()
self.assertEqual(list(range(1024)),
list(self.detector._fallbackFDImplementation()))
class FileDescriptorTests(TestCase):
"""
Tests for L{twisted.internet.process._listOpenFDs}
"""
skip = platformSkip
def test_openFDs(self):
"""
File descriptors returned by L{_listOpenFDs} are mostly open.
This test assumes that zero-legth writes fail with EBADF on closed
file descriptors.
"""
for fd in process._listOpenFDs():
try:
fcntl.fcntl(fd, fcntl.F_GETFL)
except IOError as err:
self.assertEqual(
errno.EBADF, err.errno,
"fcntl(%d, F_GETFL) failed with unexpected errno %d" % (
fd, err.errno))
def test_expectedFDs(self):
"""
L{_listOpenFDs} lists expected file descriptors.
"""
# This is a tricky test. A priori, there is no way to know what file
# descriptors are open now, so there is no way to know what _listOpenFDs
# should return. Work around this by creating some new file descriptors
# which we can know the state of and then just making assertions about
# their presence or absence in the result.
# Expect a file we just opened to be listed.
f = open(os.devnull)
openfds = process._listOpenFDs()
self.assertIn(f.fileno(), openfds)
# Expect a file we just closed not to be listed - with a caveat. The
# implementation may need to open a file to discover the result. That
# open file descriptor will be allocated the same number as the one we
# just closed. So, instead, create a hole in the file descriptor space
# to catch that internal descriptor and make the assertion about a
# different closed file descriptor.
# This gets allocated a file descriptor larger than f's, since nothing
# has been closed since we opened f.
fd = os.dup(f.fileno())
# But sanity check that; if it fails the test is invalid.
self.assertTrue(
fd > f.fileno(),
"Expected duplicate file descriptor to be greater than original")
try:
# Get rid of the original, creating the hole. The copy should still
# be open, of course.
f.close()
self.assertIn(fd, process._listOpenFDs())
finally:
# Get rid of the copy now
os.close(fd)
# And it should not appear in the result.
self.assertNotIn(fd, process._listOpenFDs())

View file

@ -0,0 +1,908 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorProcess}.
@var properEnv: A copy of L{os.environ} which has L{bytes} keys/values on POSIX
platforms and native L{str} keys/values on Windows.
"""
from __future__ import division, absolute_import, print_function
import io
import os
import signal
import sys
import threading
import twisted
import subprocess
from twisted.trial.unittest import TestCase
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.python.log import msg, err
from twisted.python.runtime import platform
from twisted.python.filepath import FilePath, _asFilesystemBytes
from twisted.python.compat import (networkString, range, items,
bytesEnviron, unicode)
from twisted.internet import utils
from twisted.internet.interfaces import IReactorProcess, IProcessTransport
from twisted.internet.defer import Deferred, succeed
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.error import ProcessDone, ProcessTerminated
# Get the current Python executable as a bytestring.
pyExe = FilePath(sys.executable)._asBytesPath()
twistedRoot = FilePath(twisted.__file__).parent().parent()
_uidgidSkip = None
if platform.isWindows():
resource = None
process = None
_uidgidSkip = "Cannot change UID/GID on Windows"
properEnv = dict(os.environ)
properEnv["PYTHONPATH"] = os.pathsep.join(sys.path)
else:
import resource
from twisted.internet import process
if os.getuid() != 0:
_uidgidSkip = "Cannot change UID/GID except as root"
properEnv = bytesEnviron()
properEnv[b"PYTHONPATH"] = os.pathsep.join(sys.path).encode(
sys.getfilesystemencoding())
def onlyOnPOSIX(testMethod):
"""
Only run this test on POSIX platforms.
@param testMethod: A test function, being decorated.
@return: the C{testMethod} argument.
"""
if resource is None:
testMethod.skip = "Test only applies to POSIX platforms."
return testMethod
class _ShutdownCallbackProcessProtocol(ProcessProtocol):
"""
An L{IProcessProtocol} which fires a Deferred when the process it is
associated with ends.
@ivar received: A C{dict} mapping file descriptors to lists of bytes
received from the child process on those file descriptors.
"""
def __init__(self, whenFinished):
self.whenFinished = whenFinished
self.received = {}
def childDataReceived(self, fd, bytes):
self.received.setdefault(fd, []).append(bytes)
def processEnded(self, reason):
self.whenFinished.callback(None)
class ProcessTestsBuilderBase(ReactorBuilder):
"""
Base class for L{IReactorProcess} tests which defines some tests which
can be applied to PTY or non-PTY uses of C{spawnProcess}.
Subclasses are expected to set the C{usePTY} attribute to C{True} or
C{False}.
"""
requiredInterfaces = [IReactorProcess]
def test_processTransportInterface(self):
"""
L{IReactorProcess.spawnProcess} connects the protocol passed to it
to a transport which provides L{IProcessTransport}.
"""
ended = Deferred()
protocol = _ShutdownCallbackProcessProtocol(ended)
reactor = self.buildReactor()
transport = reactor.spawnProcess(
protocol, pyExe, [pyExe, b"-c", b""],
usePTY=self.usePTY)
# The transport is available synchronously, so we can check it right
# away (unlike many transport-based tests). This is convenient even
# though it's probably not how the spawnProcess interface should really
# work.
# We're not using verifyObject here because part of
# IProcessTransport is a lie - there are no getHost or getPeer
# methods. See #1124.
self.assertTrue(IProcessTransport.providedBy(transport))
# Let the process run and exit so we don't leave a zombie around.
ended.addCallback(lambda ignored: reactor.stop())
self.runReactor(reactor)
def _writeTest(self, write):
"""
Helper for testing L{IProcessTransport} write functionality. This
method spawns a child process and gives C{write} a chance to write some
bytes to it. It then verifies that the bytes were actually written to
it (by relying on the child process to echo them back).
@param write: A two-argument callable. This is invoked with a process
transport and some bytes to write to it.
"""
reactor = self.buildReactor()
ended = Deferred()
protocol = _ShutdownCallbackProcessProtocol(ended)
bytesToSend = b"hello, world" + networkString(os.linesep)
program = (
b"import sys\n"
b"sys.stdout.write(sys.stdin.readline())\n"
)
def startup():
transport = reactor.spawnProcess(
protocol, pyExe, [pyExe, b"-c", program])
try:
write(transport, bytesToSend)
except:
err(None, "Unhandled exception while writing")
transport.signalProcess('KILL')
reactor.callWhenRunning(startup)
ended.addCallback(lambda ignored: reactor.stop())
self.runReactor(reactor)
self.assertEqual(bytesToSend, b"".join(protocol.received[1]))
def test_write(self):
"""
L{IProcessTransport.write} writes the specified C{bytes} to the standard
input of the child process.
"""
def write(transport, bytesToSend):
transport.write(bytesToSend)
self._writeTest(write)
def test_writeSequence(self):
"""
L{IProcessTransport.writeSequence} writes the specified C{list} of
C{bytes} to the standard input of the child process.
"""
def write(transport, bytesToSend):
transport.writeSequence([bytesToSend])
self._writeTest(write)
def test_writeToChild(self):
"""
L{IProcessTransport.writeToChild} writes the specified C{bytes} to the
specified file descriptor of the child process.
"""
def write(transport, bytesToSend):
transport.writeToChild(0, bytesToSend)
self._writeTest(write)
def test_writeToChildBadFileDescriptor(self):
"""
L{IProcessTransport.writeToChild} raises L{KeyError} if passed a file
descriptor which is was not set up by L{IReactorProcess.spawnProcess}.
"""
def write(transport, bytesToSend):
try:
self.assertRaises(KeyError, transport.writeToChild, 13, bytesToSend)
finally:
# Just get the process to exit so the test can complete
transport.write(bytesToSend)
self._writeTest(write)
def test_spawnProcessEarlyIsReaped(self):
"""
If, before the reactor is started with L{IReactorCore.run}, a
process is started with L{IReactorProcess.spawnProcess} and
terminates, the process is reaped once the reactor is started.
"""
reactor = self.buildReactor()
# Create the process with no shared file descriptors, so that there
# are no other events for the reactor to notice and "cheat" with.
# We want to be sure it's really dealing with the process exiting,
# not some associated event.
if self.usePTY:
childFDs = None
else:
childFDs = {}
# Arrange to notice the SIGCHLD.
signaled = threading.Event()
def handler(*args):
signaled.set()
signal.signal(signal.SIGCHLD, handler)
# Start a process - before starting the reactor!
ended = Deferred()
reactor.spawnProcess(
_ShutdownCallbackProcessProtocol(ended), pyExe,
[pyExe, b"-c", b""], usePTY=self.usePTY, childFDs=childFDs)
# Wait for the SIGCHLD (which might have been delivered before we got
# here, but that's okay because the signal handler was installed above,
# before we could have gotten it).
signaled.wait(120)
if not signaled.isSet():
self.fail("Timed out waiting for child process to exit.")
# Capture the processEnded callback.
result = []
ended.addCallback(result.append)
if result:
# The synchronous path through spawnProcess / Process.__init__ /
# registerReapProcessHandler was encountered. There's no reason to
# start the reactor, because everything is done already.
return
# Otherwise, though, start the reactor so it can tell us the process
# exited.
ended.addCallback(lambda ignored: reactor.stop())
self.runReactor(reactor)
# Make sure the reactor stopped because the Deferred fired.
self.assertTrue(result)
if getattr(signal, 'SIGCHLD', None) is None:
test_spawnProcessEarlyIsReaped.skip = (
"Platform lacks SIGCHLD, early-spawnProcess test can't work.")
def test_processExitedWithSignal(self):
"""
The C{reason} argument passed to L{IProcessProtocol.processExited} is a
L{ProcessTerminated} instance if the child process exits with a signal.
"""
sigName = 'TERM'
sigNum = getattr(signal, 'SIG' + sigName)
exited = Deferred()
source = (
b"import sys\n"
# Talk so the parent process knows the process is running. This is
# necessary because ProcessProtocol.makeConnection may be called
# before this process is exec'd. It would be unfortunate if we
# SIGTERM'd the Twisted process while it was on its way to doing
# the exec.
b"sys.stdout.write('x')\n"
b"sys.stdout.flush()\n"
b"sys.stdin.read()\n")
class Exiter(ProcessProtocol):
def childDataReceived(self, fd, data):
msg('childDataReceived(%d, %r)' % (fd, data))
self.transport.signalProcess(sigName)
def childConnectionLost(self, fd):
msg('childConnectionLost(%d)' % (fd,))
def processExited(self, reason):
msg('processExited(%r)' % (reason,))
# Protect the Deferred from the failure so that it follows
# the callback chain. This doesn't use the errback chain
# because it wants to make sure reason is a Failure. An
# Exception would also make an errback-based test pass, and
# that would be wrong.
exited.callback([reason])
def processEnded(self, reason):
msg('processEnded(%r)' % (reason,))
reactor = self.buildReactor()
reactor.callWhenRunning(
reactor.spawnProcess, Exiter(), pyExe,
[pyExe, b"-c", source], usePTY=self.usePTY)
def cbExited(args):
failure, = args
# Trapping implicitly verifies that it's a Failure (rather than
# an exception) and explicitly makes sure it's the right type.
failure.trap(ProcessTerminated)
err = failure.value
if platform.isWindows():
# Windows can't really /have/ signals, so it certainly can't
# report them as the reason for termination. Maybe there's
# something better we could be doing here, anyway? Hard to
# say. Anyway, this inconsistency between different platforms
# is extremely unfortunate and I would remove it if I
# could. -exarkun
self.assertIsNone(err.signal)
self.assertEqual(err.exitCode, 1)
else:
self.assertEqual(err.signal, sigNum)
self.assertIsNone(err.exitCode)
exited.addCallback(cbExited)
exited.addErrback(err)
exited.addCallback(lambda ign: reactor.stop())
self.runReactor(reactor)
def test_systemCallUninterruptedByChildExit(self):
"""
If a child process exits while a system call is in progress, the system
call should not be interfered with. In particular, it should not fail
with EINTR.
Older versions of Twisted installed a SIGCHLD handler on POSIX without
using the feature exposed by the SA_RESTART flag to sigaction(2). The
most noticeable problem this caused was for blocking reads and writes to
sometimes fail with EINTR.
"""
reactor = self.buildReactor()
result = []
def f():
try:
exe = pyExe.decode(sys.getfilesystemencoding())
subprocess.Popen([exe, "-c", "import time; time.sleep(0.1)"])
f2 = subprocess.Popen([exe, "-c",
("import time; time.sleep(0.5);"
"print(\'Foo\')")],
stdout=subprocess.PIPE)
# The read call below will blow up with an EINTR from the
# SIGCHLD from the first process exiting if we install a
# SIGCHLD handler without SA_RESTART. (which we used to do)
with f2.stdout:
result.append(f2.stdout.read())
finally:
reactor.stop()
reactor.callWhenRunning(f)
self.runReactor(reactor)
self.assertEqual(result, [b"Foo" + os.linesep.encode('ascii')])
@onlyOnPOSIX
def test_openFileDescriptors(self):
"""
Processes spawned with spawnProcess() close all extraneous file
descriptors in the parent. They do have a stdin, stdout, and stderr
open.
"""
# To test this, we are going to open a file descriptor in the parent
# that is unlikely to be opened in the child, then verify that it's not
# open in the child.
source = networkString("""
import sys
sys.path.insert(0, '{0}')
from twisted.internet import process
sys.stdout.write(repr(process._listOpenFDs()))
sys.stdout.flush()""".format(twistedRoot.path))
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
# The call to "os.listdir()" (in _listOpenFDs's implementation) opens a
# file descriptor (with "opendir"), which shows up in _listOpenFDs's
# result. And speaking of "random" file descriptors, the code required
# for _listOpenFDs itself imports logger, which imports random, which
# (depending on your Python version) might leave /dev/urandom open.
# More generally though, even if we were to use an extremely minimal C
# program, the operating system would be within its rights to open file
# descriptors we might not know about in the C library's
# initialization; things like debuggers, profilers, or nsswitch plugins
# might open some and this test should pass in those environments.
# Although some of these file descriptors aren't predictable, we should
# at least be able to select a very large file descriptor which is very
# unlikely to be opened automatically in the subprocess. (Apply a
# fudge factor to avoid hard-coding something too near a limit
# condition like the maximum possible file descriptor, which a library
# might at least hypothetically select.)
fudgeFactor = 17
unlikelyFD = (resource.getrlimit(resource.RLIMIT_NOFILE)[0]
- fudgeFactor)
os.dup2(w, unlikelyFD)
self.addCleanup(os.close, unlikelyFD)
output = io.BytesIO()
class GatheringProtocol(ProcessProtocol):
outReceived = output.write
def processEnded(self, reason):
reactor.stop()
reactor = self.buildReactor()
reactor.callWhenRunning(
reactor.spawnProcess, GatheringProtocol(), pyExe,
[pyExe, b"-Wignore", b"-c", source], usePTY=self.usePTY)
self.runReactor(reactor)
reportedChildFDs = set(eval(output.getvalue()))
stdFDs = [0, 1, 2]
# Unfortunately this assertion is still not *entirely* deterministic,
# since hypothetically, any library could open any file descriptor at
# any time. See comment above.
self.assertEqual(
reportedChildFDs.intersection(set(stdFDs + [unlikelyFD])),
set(stdFDs)
)
@onlyOnPOSIX
def test_errorDuringExec(self):
"""
When L{os.execvpe} raises an exception, it will format that exception
on stderr as UTF-8, regardless of system encoding information.
"""
def execvpe(*args, **kw):
# Ensure that real traceback formatting has some non-ASCII in it,
# by forcing the filename of the last frame to contain non-ASCII.
filename = u"<\N{SNOWMAN}>"
if not isinstance(filename, str):
filename = filename.encode("utf-8")
codeobj = compile("1/0", filename, "single")
eval(codeobj)
self.patch(os, "execvpe", execvpe)
self.patch(sys, "getfilesystemencoding", lambda: "ascii")
reactor = self.buildReactor()
output = io.BytesIO()
@reactor.callWhenRunning
def whenRunning():
class TracebackCatcher(ProcessProtocol, object):
errReceived = output.write
def processEnded(self, reason):
reactor.stop()
reactor.spawnProcess(TracebackCatcher(), pyExe,
[pyExe, b"-c", b""])
self.runReactor(reactor, timeout=30)
self.assertIn(u"\N{SNOWMAN}".encode("utf-8"), output.getvalue())
def test_timelyProcessExited(self):
"""
If a spawned process exits, C{processExited} will be called in a
timely manner.
"""
reactor = self.buildReactor()
class ExitingProtocol(ProcessProtocol):
exited = False
def processExited(protoSelf, reason):
protoSelf.exited = True
reactor.stop()
self.assertEqual(reason.value.exitCode, 0)
protocol = ExitingProtocol()
reactor.callWhenRunning(
reactor.spawnProcess, protocol, pyExe,
[pyExe, b"-c", b"raise SystemExit(0)"],
usePTY=self.usePTY)
# This will timeout if processExited isn't called:
self.runReactor(reactor, timeout=30)
self.assertTrue(protocol.exited)
def _changeIDTest(self, which):
"""
Launch a child process, using either the C{uid} or C{gid} argument to
L{IReactorProcess.spawnProcess} to change either its UID or GID to a
different value. If the child process reports this hasn't happened,
raise an exception to fail the test.
@param which: Either C{b"uid"} or C{b"gid"}.
"""
program = [
"import os",
"raise SystemExit(os.get%s() != 1)" % (which,)]
container = []
class CaptureExitStatus(ProcessProtocol):
def processEnded(self, reason):
container.append(reason)
reactor.stop()
reactor = self.buildReactor()
protocol = CaptureExitStatus()
reactor.callWhenRunning(
reactor.spawnProcess, protocol, pyExe,
[pyExe, "-c", "\n".join(program)],
**{which: 1})
self.runReactor(reactor)
self.assertEqual(0, container[0].value.exitCode)
def test_changeUID(self):
"""
If a value is passed for L{IReactorProcess.spawnProcess}'s C{uid}, the
child process is run with that UID.
"""
self._changeIDTest("uid")
if _uidgidSkip is not None:
test_changeUID.skip = _uidgidSkip
def test_changeGID(self):
"""
If a value is passed for L{IReactorProcess.spawnProcess}'s C{gid}, the
child process is run with that GID.
"""
self._changeIDTest("gid")
if _uidgidSkip is not None:
test_changeGID.skip = _uidgidSkip
def test_processExitedRaises(self):
"""
If L{IProcessProtocol.processExited} raises an exception, it is logged.
"""
# Ideally we wouldn't need to poke the process module; see
# https://twistedmatrix.com/trac/ticket/6889
reactor = self.buildReactor()
class TestException(Exception):
pass
class Protocol(ProcessProtocol):
def processExited(self, reason):
reactor.stop()
raise TestException("processedExited raised")
protocol = Protocol()
transport = reactor.spawnProcess(
protocol, pyExe, [pyExe, b"-c", b""],
usePTY=self.usePTY)
self.runReactor(reactor)
# Manually clean-up broken process handler.
# Only required if the test fails on systems that support
# the process module.
if process is not None:
for pid, handler in items(process.reapProcessHandlers):
if handler is not transport:
continue
process.unregisterReapProcessHandler(pid, handler)
self.fail("After processExited raised, transport was left in"
" reapProcessHandlers")
self.assertEqual(1, len(self.flushLoggedErrors(TestException)))
class ProcessTestsBuilder(ProcessTestsBuilderBase):
"""
Builder defining tests relating to L{IReactorProcess} for child processes
which do not have a PTY.
"""
usePTY = False
keepStdioOpenProgram = b'twisted.internet.test.process_helper'
if platform.isWindows():
keepStdioOpenArg = b"windows"
else:
# Just a value that doesn't equal "windows"
keepStdioOpenArg = b""
# Define this test here because PTY-using processes only have stdin and
# stdout and the test would need to be different for that to work.
def test_childConnectionLost(self):
"""
L{IProcessProtocol.childConnectionLost} is called each time a file
descriptor associated with a child process is closed.
"""
connected = Deferred()
lost = {0: Deferred(), 1: Deferred(), 2: Deferred()}
class Closer(ProcessProtocol):
def makeConnection(self, transport):
connected.callback(transport)
def childConnectionLost(self, childFD):
lost[childFD].callback(None)
target = b"twisted.internet.test.process_loseconnection"
reactor = self.buildReactor()
reactor.callWhenRunning(
reactor.spawnProcess, Closer(), pyExe,
[pyExe, b"-m", target], env=properEnv, usePTY=self.usePTY)
def cbConnected(transport):
transport.write(b'2\n')
return lost[2].addCallback(lambda ign: transport)
connected.addCallback(cbConnected)
def lostSecond(transport):
transport.write(b'1\n')
return lost[1].addCallback(lambda ign: transport)
connected.addCallback(lostSecond)
def lostFirst(transport):
transport.write(b'\n')
connected.addCallback(lostFirst)
connected.addErrback(err)
def cbEnded(ignored):
reactor.stop()
connected.addCallback(cbEnded)
self.runReactor(reactor)
# This test is here because PTYProcess never delivers childConnectionLost.
def test_processEnded(self):
"""
L{IProcessProtocol.processEnded} is called after the child process
exits and L{IProcessProtocol.childConnectionLost} is called for each of
its file descriptors.
"""
ended = Deferred()
lost = []
class Ender(ProcessProtocol):
def childDataReceived(self, fd, data):
msg('childDataReceived(%d, %r)' % (fd, data))
self.transport.loseConnection()
def childConnectionLost(self, childFD):
msg('childConnectionLost(%d)' % (childFD,))
lost.append(childFD)
def processExited(self, reason):
msg('processExited(%r)' % (reason,))
def processEnded(self, reason):
msg('processEnded(%r)' % (reason,))
ended.callback([reason])
reactor = self.buildReactor()
reactor.callWhenRunning(
reactor.spawnProcess, Ender(), pyExe,
[pyExe, b"-m", self.keepStdioOpenProgram, b"child",
self.keepStdioOpenArg],
env=properEnv, usePTY=self.usePTY)
def cbEnded(args):
failure, = args
failure.trap(ProcessDone)
self.assertEqual(set(lost), set([0, 1, 2]))
ended.addCallback(cbEnded)
ended.addErrback(err)
ended.addCallback(lambda ign: reactor.stop())
self.runReactor(reactor)
# This test is here because PTYProcess.loseConnection does not actually
# close the file descriptors to the child process. This test needs to be
# written fairly differently for PTYProcess.
def test_processExited(self):
"""
L{IProcessProtocol.processExited} is called when the child process
exits, even if file descriptors associated with the child are still
open.
"""
exited = Deferred()
allLost = Deferred()
lost = []
class Waiter(ProcessProtocol):
def childDataReceived(self, fd, data):
msg('childDataReceived(%d, %r)' % (fd, data))
def childConnectionLost(self, childFD):
msg('childConnectionLost(%d)' % (childFD,))
lost.append(childFD)
if len(lost) == 3:
allLost.callback(None)
def processExited(self, reason):
msg('processExited(%r)' % (reason,))
# See test_processExitedWithSignal
exited.callback([reason])
self.transport.loseConnection()
reactor = self.buildReactor()
reactor.callWhenRunning(
reactor.spawnProcess, Waiter(), pyExe,
[pyExe, b"-u", b"-m", self.keepStdioOpenProgram, b"child",
self.keepStdioOpenArg],
env=properEnv, usePTY=self.usePTY)
def cbExited(args):
failure, = args
failure.trap(ProcessDone)
msg('cbExited; lost = %s' % (lost,))
self.assertEqual(lost, [])
return allLost
exited.addCallback(cbExited)
def cbAllLost(ignored):
self.assertEqual(set(lost), set([0, 1, 2]))
exited.addCallback(cbAllLost)
exited.addErrback(err)
exited.addCallback(lambda ign: reactor.stop())
self.runReactor(reactor)
def makeSourceFile(self, sourceLines):
"""
Write the given list of lines to a text file and return the absolute
path to it.
"""
script = _asFilesystemBytes(self.mktemp())
with open(script, 'wt') as scriptFile:
scriptFile.write(os.linesep.join(sourceLines) + os.linesep)
return os.path.abspath(script)
def test_shebang(self):
"""
Spawning a process with an executable which is a script starting
with an interpreter definition line (#!) uses that interpreter to
evaluate the script.
"""
shebangOutput = b'this is the shebang output'
scriptFile = self.makeSourceFile([
"#!%s" % (pyExe.decode('ascii'),),
"import sys",
"sys.stdout.write('%s')" % (shebangOutput.decode('ascii'),),
"sys.stdout.flush()"])
os.chmod(scriptFile, 0o700)
reactor = self.buildReactor()
def cbProcessExited(args):
out, err, code = args
msg("cbProcessExited((%r, %r, %d))" % (out, err, code))
self.assertEqual(out, shebangOutput)
self.assertEqual(err, b"")
self.assertEqual(code, 0)
def shutdown(passthrough):
reactor.stop()
return passthrough
def start():
d = utils.getProcessOutputAndValue(scriptFile, reactor=reactor)
d.addBoth(shutdown)
d.addCallback(cbProcessExited)
d.addErrback(err)
reactor.callWhenRunning(start)
self.runReactor(reactor)
def test_processCommandLineArguments(self):
"""
Arguments given to spawnProcess are passed to the child process as
originally intended.
"""
us = b"twisted.internet.test.process_cli"
args = [b'hello', b'"', b' \t|<>^&', br'"\\"hello\\"', br'"foo\ bar baz\""']
# Ensure that all non-NUL characters can be passed too.
allChars = "".join(map(chr, range(1, 255)))
if isinstance(allChars, unicode):
allChars.encode("utf-8")
reactor = self.buildReactor()
def processFinished(finishedArgs):
output, err, code = finishedArgs
output = output.split(b'\0')
# Drop the trailing \0.
output.pop()
self.assertEqual(args, output)
def shutdown(result):
reactor.stop()
return result
def spawnChild():
d = succeed(None)
d.addCallback(lambda dummy: utils.getProcessOutputAndValue(
pyExe, [b"-m", us] + args, env=properEnv,
reactor=reactor))
d.addCallback(processFinished)
d.addBoth(shutdown)
reactor.callWhenRunning(spawnChild)
self.runReactor(reactor)
globals().update(ProcessTestsBuilder.makeTestCaseClasses())
class PTYProcessTestsBuilder(ProcessTestsBuilderBase):
"""
Builder defining tests relating to L{IReactorProcess} for child processes
which have a PTY.
"""
usePTY = True
if platform.isWindows():
skip = "PTYs are not supported on Windows."
elif platform.isMacOSX():
skip = "PTYs are flaky from a Darwin bug. See #8840."
skippedReactors = {
"twisted.internet.pollreactor.PollReactor":
"macOS's poll() does not support PTYs"}
globals().update(PTYProcessTestsBuilder.makeTestCaseClasses())
class PotentialZombieWarningTests(TestCase):
"""
Tests for L{twisted.internet.error.PotentialZombieWarning}.
"""
def test_deprecated(self):
"""
Accessing L{PotentialZombieWarning} via the
I{PotentialZombieWarning} attribute of L{twisted.internet.error}
results in a deprecation warning being emitted.
"""
from twisted.internet import error
error.PotentialZombieWarning
warnings = self.flushWarnings([self.test_deprecated])
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"twisted.internet.error.PotentialZombieWarning was deprecated in "
"Twisted 10.0.0: There is no longer any potential for zombie "
"process.")
self.assertEqual(len(warnings), 1)
class ProcessIsUnimportableOnUnsupportedPlatormsTests(TestCase):
"""
Tests to ensure that L{twisted.internet.process} is unimportable on
platforms where it does not work (namely Windows).
"""
def test_unimportableOnWindows(self):
"""
L{twisted.internet.process} is unimportable on Windows.
"""
with self.assertRaises(ImportError):
import twisted.internet.process
twisted.internet.process # shh pyflakes
if not platform.isWindows():
test_unimportableOnWindows.skip = "Only relevant on Windows."

View file

@ -0,0 +1,520 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.protocol}.
"""
from __future__ import division, absolute_import
from io import BytesIO
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.internet.defer import CancelledError
from twisted.internet.interfaces import (
IProtocol, ILoggingContext, IProtocolFactory, IConsumer)
from twisted.internet.protocol import (
Protocol, ClientCreator, Factory, ProtocolToConsumerAdapter,
ConsumerToProtocolAdapter, FileWrapper)
from twisted.logger import LogLevel, globalLogPublisher
from twisted.python.compat import _PY3
from twisted.python.failure import Failure
from twisted.test.proto_helpers import MemoryReactorClock, StringTransport
from twisted.trial.unittest import TestCase
class ClientCreatorTests(TestCase):
"""
Tests for L{twisted.internet.protocol.ClientCreator}.
"""
def _basicConnectTest(self, check):
"""
Helper for implementing a test to verify that one of the I{connect}
methods of L{ClientCreator} passes the right arguments to the right
reactor method.
@param check: A function which will be invoked with a reactor and a
L{ClientCreator} instance and which should call one of the
L{ClientCreator}'s I{connect} methods and assert that all of its
arguments except for the factory are passed on as expected to the
reactor. The factory should be returned.
"""
class SomeProtocol(Protocol):
pass
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, SomeProtocol)
factory = check(reactor, cc)
protocol = factory.buildProtocol(None)
self.assertIsInstance(protocol, SomeProtocol)
def test_connectTCP(self):
"""
L{ClientCreator.connectTCP} calls C{reactor.connectTCP} with the host
and port information passed to it, and with a factory which will
construct the protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
cc.connectTCP('example.com', 1234, 4321, ('1.2.3.4', 9876))
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
self.assertEqual(host, 'example.com')
self.assertEqual(port, 1234)
self.assertEqual(timeout, 4321)
self.assertEqual(bindAddress, ('1.2.3.4', 9876))
return factory
self._basicConnectTest(check)
def test_connectUNIX(self):
"""
L{ClientCreator.connectUNIX} calls C{reactor.connectUNIX} with the
filename passed to it, and with a factory which will construct the
protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
cc.connectUNIX('/foo/bar', 123, True)
address, factory, timeout, checkPID = reactor.unixClients.pop()
self.assertEqual(address, '/foo/bar')
self.assertEqual(timeout, 123)
self.assertTrue(checkPID)
return factory
self._basicConnectTest(check)
def test_connectSSL(self):
"""
L{ClientCreator.connectSSL} calls C{reactor.connectSSL} with the host,
port, and context factory passed to it, and with a factory which will
construct the protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
expectedContextFactory = object()
cc.connectSSL('example.com', 1234, expectedContextFactory, 4321, ('4.3.2.1', 5678))
host, port, factory, contextFactory, timeout, bindAddress = reactor.sslClients.pop()
self.assertEqual(host, 'example.com')
self.assertEqual(port, 1234)
self.assertIs(contextFactory, expectedContextFactory)
self.assertEqual(timeout, 4321)
self.assertEqual(bindAddress, ('4.3.2.1', 5678))
return factory
self._basicConnectTest(check)
def _cancelConnectTest(self, connect):
"""
Helper for implementing a test to verify that cancellation of the
L{Deferred} returned by one of L{ClientCreator}'s I{connect} methods is
implemented to cancel the underlying connector.
@param connect: A function which will be invoked with a L{ClientCreator}
instance as an argument and which should call one its I{connect}
methods and return the result.
@return: A L{Deferred} which fires when the test is complete or fails if
there is a problem.
"""
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, Protocol)
d = connect(cc)
connector = reactor.connectors.pop()
self.assertFalse(connector._disconnected)
d.cancel()
self.assertTrue(connector._disconnected)
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCP(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectTCP('example.com', 1234)
return self._cancelConnectTest(connect)
def test_cancelConnectUNIX(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectUNIX('/foo/bar')
return self._cancelConnectTest(connect)
def test_cancelConnectSSL(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectSSL('example.com', 1234, object())
return self._cancelConnectTest(connect)
def _cancelConnectTimeoutTest(self, connect):
"""
Like L{_cancelConnectTest}, but for the case where the L{Deferred} is
cancelled after the connection is set up but before it is fired with the
resulting protocol instance.
"""
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, Protocol)
d = connect(reactor, cc)
connector = reactor.connectors.pop()
# Sanity check - there is an outstanding delayed call to fire the
# Deferred.
self.assertEqual(len(reactor.getDelayedCalls()), 1)
# Cancel the Deferred, disconnecting the transport just set up and
# cancelling the delayed call.
d.cancel()
self.assertEqual(reactor.getDelayedCalls(), [])
# A real connector implementation is responsible for disconnecting the
# transport as well. For our purposes, just check that someone told the
# connector to disconnect.
self.assertTrue(connector._disconnected)
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCPTimeout(self):
"""
L{ClientCreator.connectTCP} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectTCP('example.com', 1234)
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def test_cancelConnectUNIXTimeout(self):
"""
L{ClientCreator.connectUNIX} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectUNIX('/foo/bar')
address, factory, timeout, bindAddress = reactor.unixClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def test_cancelConnectSSLTimeout(self):
"""
L{ClientCreator.connectSSL} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectSSL('example.com', 1234, object())
host, port, factory, contextFactory, timeout, bindADdress = reactor.sslClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def _cancelConnectFailedTimeoutTest(self, connect):
"""
Like L{_cancelConnectTest}, but for the case where the L{Deferred} is
cancelled after the connection attempt has failed but before it is fired
with the resulting failure.
"""
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, Protocol)
d, factory = connect(reactor, cc)
connector = reactor.connectors.pop()
factory.clientConnectionFailed(
connector, Failure(Exception("Simulated failure")))
# Sanity check - there is an outstanding delayed call to fire the
# Deferred.
self.assertEqual(len(reactor.getDelayedCalls()), 1)
# Cancel the Deferred, cancelling the delayed call.
d.cancel()
self.assertEqual(reactor.getDelayedCalls(), [])
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCPFailedTimeout(self):
"""
Similar to L{test_cancelConnectTCPTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectTCP('example.com', 1234)
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
def test_cancelConnectUNIXFailedTimeout(self):
"""
Similar to L{test_cancelConnectUNIXTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectUNIX('/foo/bar')
address, factory, timeout, bindAddress = reactor.unixClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
def test_cancelConnectSSLFailedTimeout(self):
"""
Similar to L{test_cancelConnectSSLTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectSSL('example.com', 1234, object())
host, port, factory, contextFactory, timeout, bindADdress = reactor.sslClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
class ProtocolTests(TestCase):
"""
Tests for L{twisted.internet.protocol.Protocol}.
"""
def test_interfaces(self):
"""
L{Protocol} instances provide L{IProtocol} and L{ILoggingContext}.
"""
proto = Protocol()
self.assertTrue(verifyObject(IProtocol, proto))
self.assertTrue(verifyObject(ILoggingContext, proto))
def test_logPrefix(self):
"""
L{Protocol.logPrefix} returns the protocol class's name.
"""
class SomeThing(Protocol):
pass
self.assertEqual("SomeThing", SomeThing().logPrefix())
def test_makeConnection(self):
"""
L{Protocol.makeConnection} sets the given transport on itself, and
then calls C{connectionMade}.
"""
result = []
class SomeProtocol(Protocol):
def connectionMade(self):
result.append(self.transport)
transport = object()
protocol = SomeProtocol()
protocol.makeConnection(transport)
self.assertEqual(result, [transport])
class FactoryTests(TestCase):
"""
Tests for L{protocol.Factory}.
"""
def test_interfaces(self):
"""
L{Factory} instances provide both L{IProtocolFactory} and
L{ILoggingContext}.
"""
factory = Factory()
self.assertTrue(verifyObject(IProtocolFactory, factory))
self.assertTrue(verifyObject(ILoggingContext, factory))
def test_logPrefix(self):
"""
L{Factory.logPrefix} returns the name of the factory class.
"""
class SomeKindOfFactory(Factory):
pass
self.assertEqual("SomeKindOfFactory", SomeKindOfFactory().logPrefix())
def test_defaultBuildProtocol(self):
"""
L{Factory.buildProtocol} by default constructs a protocol by calling
its C{protocol} attribute, and attaches the factory to the result.
"""
class SomeProtocol(Protocol):
pass
f = Factory()
f.protocol = SomeProtocol
protocol = f.buildProtocol(None)
self.assertIsInstance(protocol, SomeProtocol)
self.assertIs(protocol.factory, f)
def test_forProtocol(self):
"""
L{Factory.forProtocol} constructs a Factory, passing along any
additional arguments, and sets its C{protocol} attribute to the given
Protocol subclass.
"""
class ArgTakingFactory(Factory):
def __init__(self, *args, **kwargs):
self.args, self.kwargs = args, kwargs
factory = ArgTakingFactory.forProtocol(Protocol, 1, 2, foo=12)
self.assertEqual(factory.protocol, Protocol)
self.assertEqual(factory.args, (1, 2))
self.assertEqual(factory.kwargs, {"foo": 12})
def test_doStartLoggingStatement(self):
"""
L{Factory.doStart} logs that it is starting a factory, followed by
the L{repr} of the L{Factory} instance that is being started.
"""
events = []
globalLogPublisher.addObserver(events.append)
self.addCleanup(
lambda: globalLogPublisher.removeObserver(events.append))
f = Factory()
f.doStart()
self.assertIs(events[0]['factory'], f)
self.assertEqual(events[0]['log_level'], LogLevel.info)
self.assertEqual(events[0]['log_format'],
'Starting factory {factory!r}')
def test_doStopLoggingStatement(self):
"""
L{Factory.doStop} logs that it is stopping a factory, followed by
the L{repr} of the L{Factory} instance that is being stopped.
"""
events = []
globalLogPublisher.addObserver(events.append)
self.addCleanup(
lambda: globalLogPublisher.removeObserver(events.append))
class MyFactory(Factory):
numPorts = 1
f = MyFactory()
f.doStop()
self.assertIs(events[0]['factory'], f)
self.assertEqual(events[0]['log_level'], LogLevel.info)
self.assertEqual(events[0]['log_format'],
'Stopping factory {factory!r}')
class AdapterTests(TestCase):
"""
Tests for L{ProtocolToConsumerAdapter} and L{ConsumerToProtocolAdapter}.
"""
def test_protocolToConsumer(self):
"""
L{IProtocol} providers can be adapted to L{IConsumer} providers using
L{ProtocolToConsumerAdapter}.
"""
result = []
p = Protocol()
p.dataReceived = result.append
consumer = IConsumer(p)
consumer.write(b"hello")
self.assertEqual(result, [b"hello"])
self.assertIsInstance(consumer, ProtocolToConsumerAdapter)
def test_consumerToProtocol(self):
"""
L{IConsumer} providers can be adapted to L{IProtocol} providers using
L{ProtocolToConsumerAdapter}.
"""
result = []
@implementer(IConsumer)
class Consumer(object):
def write(self, d):
result.append(d)
c = Consumer()
protocol = IProtocol(c)
protocol.dataReceived(b"hello")
self.assertEqual(result, [b"hello"])
self.assertIsInstance(protocol, ConsumerToProtocolAdapter)
class FileWrapperTests(TestCase):
"""
L{twisted.internet.protocol.FileWrapper}
"""
def test_write(self):
"""
L{twisted.internet.protocol.FileWrapper.write}
"""
wrapper = FileWrapper(BytesIO())
wrapper.write(b"test1")
self.assertEqual(wrapper.file.getvalue(), b"test1")
wrapper = FileWrapper(BytesIO())
# BytesIO() cannot accept unicode, so this will
# cause an exception to be thrown which will be
# handled by FileWrapper.handle_exception().
wrapper.write(u"stuff")
self.assertNotEqual(wrapper.file.getvalue(), u"stuff")
def test_writeSequence(self):
"""
L{twisted.internet.protocol.FileWrapper.writeSequence}
"""
wrapper = FileWrapper(BytesIO())
wrapper.writeSequence([b"test1", b"test2"])
self.assertEqual(wrapper.file.getvalue(), b"test1test2")
wrapper = FileWrapper(BytesIO())
if _PY3:
# In Python 3, b"".join([u"a", u"b"]) will raise a TypeError
self.assertRaises(TypeError,
wrapper.writeSequence,
[u"test3", u"test4"])
else:
# In Python 2, b"".join([u"a", u"b"])
# will give u"ab", but writing unicode to BytesIO
# will throw an exception which will be caught
# and ignored by FileWrapper.handle_exception()
wrapper.writeSequence([u"test3", u"test4"])
self.assertTrue(len(wrapper.file.getvalue()) == 0)

View file

@ -0,0 +1,596 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IHostnameResolver} and their interactions with
reactor implementations.
"""
from __future__ import division, absolute_import
__metaclass__ = type
from collections import defaultdict
from socket import (
getaddrinfo, gaierror, EAI_NONAME, AF_INET, AF_INET6, AF_UNSPEC,
SOCK_STREAM, SOCK_DGRAM, IPPROTO_TCP
)
from threading import local, Lock
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.internet.interfaces import (
IResolutionReceiver, IResolverSimple, IReactorPluggableNameResolver,
IHostnameResolver,
)
from twisted.trial.unittest import (
SynchronousTestCase as UnitTest
)
from twisted.python.threadpool import ThreadPool
from twisted._threads import createMemoryWorker, Team, LockWorker
from twisted.internet.address import IPv4Address, IPv6Address
from twisted.internet._resolver import (
GAIResolver, SimpleResolverComplexifier, ComplexResolverSimplifier
)
from twisted.internet.defer import Deferred
from twisted.internet.error import DNSLookupError
from twisted.internet.base import (
PluggableResolverMixin,
ReactorBase,
)
class DeterministicThreadPool(ThreadPool, object):
"""
Create a deterministic L{ThreadPool} object.
"""
def __init__(self, team):
"""
Create a L{DeterministicThreadPool} from a L{Team}.
"""
self.min = 1
self.max = 1
self.name = None
self.threads = []
self._team = team
def deterministicPool():
"""
Create a deterministic threadpool.
@return: 2-tuple of L{ThreadPool}, 0-argument C{work} callable; when
C{work} is called, do the work.
"""
worker, doer = createMemoryWorker()
return (
DeterministicThreadPool(Team(LockWorker(Lock(), local()),
(lambda: worker), lambda: None)),
doer
)
def deterministicReactorThreads():
"""
Create a deterministic L{IReactorThreads}
@return: a 2-tuple consisting of an L{IReactorThreads}-like object and a
0-argument callable that will perform one unit of work invoked via that
object's C{callFromThread} method.
"""
worker, doer = createMemoryWorker()
class CFT(object):
def callFromThread(self, f, *a, **k):
worker.do(lambda: f(*a, **k))
return CFT(), doer
class FakeAddrInfoGetter(object):
"""
Test object implementing getaddrinfo.
"""
def __init__(self):
"""
Create a L{FakeAddrInfoGetter}.
"""
self.calls = []
self.results = defaultdict(list)
def getaddrinfo(self, host, port, family=0, socktype=0, proto=0, flags=0):
"""
Mock for L{socket.getaddrinfo}.
@param host: see L{socket.getaddrinfo}
@param port: see L{socket.getaddrinfo}
@param family: see L{socket.getaddrinfo}
@param socktype: see L{socket.getaddrinfo}
@param proto: see L{socket.getaddrinfo}
@param flags: see L{socket.getaddrinfo}
@return: L{socket.getaddrinfo}
"""
self.calls.append((host, port, family, socktype, proto, flags))
results = self.results[host]
if results:
return results
else:
raise gaierror(EAI_NONAME,
'nodename nor servname provided, or not known')
def addResultForHost(self, host, sockaddr, family=AF_INET,
socktype=SOCK_STREAM, proto=IPPROTO_TCP,
canonname=b""):
"""
Add a result for a given hostname. When this hostname is resolved, the
result will be a L{list} of all results C{addResultForHost} has been
called with using that hostname so far.
@param host: The hostname to give this result for. This will be the
next result from L{FakeAddrInfoGetter.getaddrinfo} when passed this
host.
@type canonname: native L{str}
@param sockaddr: The resulting socket address; should be a 2-tuple for
IPv4 or a 4-tuple for IPv6.
@param family: An C{AF_*} constant that will be returned from
C{getaddrinfo}.
@param socktype: A C{SOCK_*} constant that will be returned from
C{getaddrinfo}.
@param proto: An C{IPPROTO_*} constant that will be returned from
C{getaddrinfo}.
@param canonname: A canonical name that will be returned from
C{getaddrinfo}.
@type canonname: native L{str}
"""
self.results[host].append(
(family, socktype, proto, canonname, sockaddr)
)
@implementer(IResolutionReceiver)
class ResultHolder(object):
"""
A resolution receiver which holds onto the results it received.
"""
_started = False
_ended = False
def __init__(self, testCase):
"""
Create a L{ResultHolder} with a L{UnitTest}.
"""
self._testCase = testCase
def resolutionBegan(self, hostResolution):
"""
Hostname resolution began.
@param hostResolution: see L{IResolutionReceiver}
"""
self._started = True
self._resolution = hostResolution
self._addresses = []
def addressResolved(self, address):
"""
An address was resolved.
@param address: see L{IResolutionReceiver}
"""
self._addresses.append(address)
def resolutionComplete(self):
"""
Hostname resolution is complete.
"""
self._ended = True
class HelperTests(UnitTest):
"""
Tests for error cases of helpers used in this module.
"""
def test_logErrorsInThreads(self):
"""
L{DeterministicThreadPool} will log any exceptions that its "thread"
workers encounter.
"""
self.pool, self.doThreadWork = deterministicPool()
def divideByZero():
return 1 / 0
self.pool.callInThread(divideByZero)
self.doThreadWork()
self.assertEqual(len(self.flushLoggedErrors(ZeroDivisionError)), 1)
class HostnameResolutionTests(UnitTest):
"""
Tests for hostname resolution.
"""
def setUp(self):
"""
Set up a L{GAIResolver}.
"""
self.pool, self.doThreadWork = deterministicPool()
self.reactor, self.doReactorWork = deterministicReactorThreads()
self.getter = FakeAddrInfoGetter()
self.resolver = GAIResolver(self.reactor, lambda: self.pool,
self.getter.getaddrinfo)
def test_resolveOneHost(self):
"""
Resolving an individual hostname that results in one address from
getaddrinfo results in a single call each to C{resolutionBegan},
C{addressResolved}, and C{resolutionComplete}.
"""
receiver = ResultHolder(self)
self.getter.addResultForHost(u"sample.example.com", ("4.3.2.1", 0))
resolution = self.resolver.resolveHostName(receiver,
u"sample.example.com")
self.assertIs(receiver._resolution, resolution)
self.assertEqual(receiver._started, True)
self.assertEqual(receiver._ended, False)
self.doThreadWork()
self.doReactorWork()
self.assertEqual(receiver._ended, True)
self.assertEqual(receiver._addresses,
[IPv4Address('TCP', '4.3.2.1', 0)])
def test_resolveOneIPv6Host(self):
"""
Resolving an individual hostname that results in one address from
getaddrinfo results in a single call each to C{resolutionBegan},
C{addressResolved}, and C{resolutionComplete}; C{addressResolved} will
receive an L{IPv6Address}.
"""
receiver = ResultHolder(self)
flowInfo = 1
scopeID = 2
self.getter.addResultForHost(u"sample.example.com",
("::1", 0, flowInfo, scopeID),
family=AF_INET6)
resolution = self.resolver.resolveHostName(receiver,
u"sample.example.com")
self.assertIs(receiver._resolution, resolution)
self.assertEqual(receiver._started, True)
self.assertEqual(receiver._ended, False)
self.doThreadWork()
self.doReactorWork()
self.assertEqual(receiver._ended, True)
self.assertEqual(receiver._addresses,
[IPv6Address('TCP', '::1', 0, flowInfo, scopeID)])
def test_gaierror(self):
"""
Resolving a hostname that results in C{getaddrinfo} raising a
L{gaierror} will result in the L{IResolutionReceiver} receiving a call
to C{resolutionComplete} with no C{addressResolved} calls in between;
no failure is logged.
"""
receiver = ResultHolder(self)
resolution = self.resolver.resolveHostName(receiver,
u"sample.example.com")
self.assertIs(receiver._resolution, resolution)
self.doThreadWork()
self.doReactorWork()
self.assertEqual(receiver._started, True)
self.assertEqual(receiver._ended, True)
self.assertEqual(receiver._addresses, [])
def _resolveOnlyTest(self, addrTypes, expectedAF):
"""
Verify that the given set of address types results in the given C{AF_}
constant being passed to C{getaddrinfo}.
@param addrTypes: iterable of L{IAddress} implementers
@param expectedAF: an C{AF_*} constant
"""
receiver = ResultHolder(self)
resolution = self.resolver.resolveHostName(
receiver, u"sample.example.com", addressTypes=addrTypes
)
self.assertIs(receiver._resolution, resolution)
self.doThreadWork()
self.doReactorWork()
host, port, family, socktype, proto, flags = self.getter.calls[0]
self.assertEqual(family, expectedAF)
def test_resolveOnlyIPv4(self):
"""
When passed an C{addressTypes} parameter containing only
L{IPv4Address}, L{GAIResolver} will pass C{AF_INET} to C{getaddrinfo}.
"""
self._resolveOnlyTest([IPv4Address], AF_INET)
def test_resolveOnlyIPv6(self):
"""
When passed an C{addressTypes} parameter containing only
L{IPv6Address}, L{GAIResolver} will pass C{AF_INET6} to C{getaddrinfo}.
"""
self._resolveOnlyTest([IPv6Address], AF_INET6)
def test_resolveBoth(self):
"""
When passed an C{addressTypes} parameter containing both L{IPv4Address}
and L{IPv6Address} (or the default of C{None}, which carries the same
meaning), L{GAIResolver} will pass C{AF_UNSPEC} to C{getaddrinfo}.
"""
self._resolveOnlyTest([IPv4Address, IPv6Address], AF_UNSPEC)
self._resolveOnlyTest(None, AF_UNSPEC)
def test_transportSemanticsToSocketType(self):
"""
When passed a C{transportSemantics} paramter, C{'TCP'} (the value
present in L{IPv4Address.type} to indicate a stream transport) maps to
C{SOCK_STREAM} and C{'UDP'} maps to C{SOCK_DGRAM}.
"""
receiver = ResultHolder(self)
self.resolver.resolveHostName(receiver, u"example.com",
transportSemantics='TCP')
receiver2 = ResultHolder(self)
self.resolver.resolveHostName(receiver2, u"example.com",
transportSemantics='UDP')
self.doThreadWork()
self.doReactorWork()
self.doThreadWork()
self.doReactorWork()
host, port, family, socktypeT, proto, flags = self.getter.calls[0]
host, port, family, socktypeU, proto, flags = self.getter.calls[1]
self.assertEqual(socktypeT, SOCK_STREAM)
self.assertEqual(socktypeU, SOCK_DGRAM)
def test_socketTypeToAddressType(self):
"""
When L{GAIResolver} receives a C{SOCK_DGRAM} result from
C{getaddrinfo}, it returns a C{'TCP'} L{IPv4Address} or L{IPv6Address};
if it receives C{SOCK_STREAM} then it returns a C{'UDP'} type of same.
"""
receiver = ResultHolder(self)
flowInfo = 1
scopeID = 2
for socktype in SOCK_STREAM, SOCK_DGRAM:
self.getter.addResultForHost(
"example.com", ("::1", 0, flowInfo, scopeID), family=AF_INET6,
socktype=socktype
)
self.getter.addResultForHost(
"example.com", ("127.0.0.3", 0), family=AF_INET,
socktype=socktype
)
self.resolver.resolveHostName(receiver, u"example.com")
self.doThreadWork()
self.doReactorWork()
stream4, stream6, dgram4, dgram6 = receiver._addresses
self.assertEqual(stream4.type, 'TCP')
self.assertEqual(stream6.type, 'TCP')
self.assertEqual(dgram4.type, 'UDP')
self.assertEqual(dgram6.type, 'UDP')
@implementer(IResolverSimple)
class SillyResolverSimple(object):
"""
Trivial implementation of L{IResolverSimple}
"""
def __init__(self):
"""
Create a L{SillyResolverSimple} with a queue of requests it is working
on.
"""
self._requests = []
def getHostByName(self, name, timeout=()):
"""
Implement L{IResolverSimple.getHostByName}.
@param name: see L{IResolverSimple.getHostByName}.
@param timeout: see L{IResolverSimple.getHostByName}.
@return: see L{IResolverSimple.getHostByName}.
"""
self._requests.append(Deferred())
return self._requests[-1]
class LegacyCompatibilityTests(UnitTest, object):
"""
Older applications may supply an object to the reactor via
C{installResolver} that only provides L{IResolverSimple}.
L{SimpleResolverComplexifier} is a wrapper for an L{IResolverSimple}.
"""
def test_success(self):
"""
L{SimpleResolverComplexifier} translates C{resolveHostName} into
L{IResolutionReceiver.addressResolved}.
"""
simple = SillyResolverSimple()
complex = SimpleResolverComplexifier(simple)
receiver = ResultHolder(self)
self.assertEqual(receiver._started, False)
complex.resolveHostName(receiver, u"example.com")
self.assertEqual(receiver._started, True)
self.assertEqual(receiver._ended, False)
self.assertEqual(receiver._addresses, [])
simple._requests[0].callback("192.168.1.1")
self.assertEqual(receiver._addresses,
[IPv4Address('TCP', '192.168.1.1', 0)])
self.assertEqual(receiver._ended, True)
def test_failure(self):
"""
L{SimpleResolverComplexifier} translates a known error result from
L{IResolverSimple.resolveHostName} into an empty result.
"""
simple = SillyResolverSimple()
complex = SimpleResolverComplexifier(simple)
receiver = ResultHolder(self)
self.assertEqual(receiver._started, False)
complex.resolveHostName(receiver, u"example.com")
self.assertEqual(receiver._started, True)
self.assertEqual(receiver._ended, False)
self.assertEqual(receiver._addresses, [])
simple._requests[0].errback(DNSLookupError("nope"))
self.assertEqual(receiver._ended, True)
self.assertEqual(receiver._addresses, [])
def test_error(self):
"""
L{SimpleResolverComplexifier} translates an unknown error result from
L{IResolverSimple.resolveHostName} into an empty result and a logged
error.
"""
simple = SillyResolverSimple()
complex = SimpleResolverComplexifier(simple)
receiver = ResultHolder(self)
self.assertEqual(receiver._started, False)
complex.resolveHostName(receiver, u"example.com")
self.assertEqual(receiver._started, True)
self.assertEqual(receiver._ended, False)
self.assertEqual(receiver._addresses, [])
simple._requests[0].errback(ZeroDivisionError("zow"))
self.assertEqual(len(self.flushLoggedErrors(ZeroDivisionError)), 1)
self.assertEqual(receiver._ended, True)
self.assertEqual(receiver._addresses, [])
def test_simplifier(self):
"""
L{ComplexResolverSimplifier} translates an L{IHostnameResolver} into an
L{IResolverSimple} for applications that still expect the old
interfaces to be in place.
"""
self.pool, self.doThreadWork = deterministicPool()
self.reactor, self.doReactorWork = deterministicReactorThreads()
self.getter = FakeAddrInfoGetter()
self.resolver = GAIResolver(self.reactor, lambda: self.pool,
self.getter.getaddrinfo)
simpleResolver = ComplexResolverSimplifier(self.resolver)
self.getter.addResultForHost('example.com', ('192.168.3.4', 4321))
success = simpleResolver.getHostByName('example.com')
failure = simpleResolver.getHostByName('nx.example.com')
self.doThreadWork()
self.doReactorWork()
self.doThreadWork()
self.doReactorWork()
self.assertEqual(self.failureResultOf(failure).type, DNSLookupError)
self.assertEqual(self.successResultOf(success), '192.168.3.4')
def test_portNumber(self):
"""
L{SimpleResolverComplexifier} preserves the C{port} argument passed to
C{resolveHostName} in its returned addresses.
"""
simple = SillyResolverSimple()
complex = SimpleResolverComplexifier(simple)
receiver = ResultHolder(self)
complex.resolveHostName(receiver, u"example.com", 4321)
self.assertEqual(receiver._started, True)
self.assertEqual(receiver._ended, False)
self.assertEqual(receiver._addresses, [])
simple._requests[0].callback("192.168.1.1")
self.assertEqual(receiver._addresses,
[IPv4Address('TCP', '192.168.1.1', 4321)])
self.assertEqual(receiver._ended, True)
class JustEnoughReactor(ReactorBase):
"""
Just enough subclass implementation to be a valid L{ReactorBase} subclass.
"""
def installWaker(self):
"""
Do nothing.
"""
class ReactorInstallationTests(UnitTest, object):
"""
Tests for installing old and new resolvers onto a
L{PluggableResolverMixin} and L{ReactorBase} (from which all of Twisted's
reactor implementations derive).
"""
def test_interfaceCompliance(self):
"""
L{PluggableResolverMixin} (and its subclasses) implement both
L{IReactorPluggableNameResolver} and L{IReactorPluggableResolver}.
"""
reactor = PluggableResolverMixin()
verifyObject(IReactorPluggableNameResolver, reactor)
verifyObject(IResolverSimple, reactor.resolver)
verifyObject(IHostnameResolver, reactor.nameResolver)
def test_installingOldStyleResolver(self):
"""
L{PluggableResolverMixin} will wrap an L{IResolverSimple} in a
complexifier.
"""
reactor = PluggableResolverMixin()
it = SillyResolverSimple()
verifyObject(IResolverSimple, reactor.installResolver(it))
self.assertIsInstance(reactor.nameResolver, SimpleResolverComplexifier)
self.assertIs(reactor.nameResolver._simpleResolver, it)
def test_defaultToGAIResolver(self):
"""
L{ReactorBase} defaults to using a L{GAIResolver}.
"""
reactor = JustEnoughReactor()
self.assertIsInstance(reactor.nameResolver, GAIResolver)
self.assertIs(reactor.nameResolver._getaddrinfo, getaddrinfo)
self.assertIsInstance(reactor.resolver, ComplexResolverSimplifier)
self.assertIs(reactor.nameResolver._reactor, reactor)
self.assertIs(reactor.resolver._nameResolver, reactor.nameResolver)

View file

@ -0,0 +1,72 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.serialport}.
"""
from twisted.trial import unittest
from twisted.python.failure import Failure
from twisted.internet.protocol import Protocol
from twisted.internet.error import ConnectionDone
try:
from twisted.internet import serialport
except ImportError:
serialport = None
class DoNothing(object):
"""
Object with methods that do nothing.
"""
def __init__(self, *args, **kwargs):
pass
def __getattr__(self, attr):
return lambda *args, **kwargs: None
class SerialPortTests(unittest.TestCase):
"""
Minimal testing for Twisted's serial port support.
See ticket #2462 for the eventual full test suite.
"""
if serialport is None:
skip = "Serial port support is not available."
def test_connectionMadeLost(self):
"""
C{connectionMade} and C{connectionLost} are called on the protocol by
the C{SerialPort}.
"""
# Serial port that doesn't actually connect to anything:
class DummySerialPort(serialport.SerialPort):
_serialFactory = DoNothing
def _finishPortSetup(self):
pass # override default win32 actions
events = []
class SerialProtocol(Protocol):
def connectionMade(self):
events.append("connectionMade")
def connectionLost(self, reason):
events.append(("connectionLost", reason))
# Creation of port should result in connectionMade call:
port = DummySerialPort(SerialProtocol(), "", reactor=DoNothing())
self.assertEqual(events, ["connectionMade"])
# Simulate reactor calling connectionLost on the SerialPort:
f = Failure(ConnectionDone())
port.connectionLost(f)
self.assertEqual(events, ["connectionMade", ("connectionLost", f)])

View file

@ -0,0 +1,125 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet._sigchld}, an alternate, superior SIGCHLD
monitoring API.
"""
from __future__ import division, absolute_import
import os, signal, errno
from twisted.python.runtime import platformType
from twisted.python.log import msg
from twisted.trial.unittest import SynchronousTestCase
if platformType == "posix":
from twisted.internet.fdesc import setNonBlocking
from twisted.internet._signals import installHandler, isDefaultHandler
else:
skip = "These tests can only run on POSIX platforms."
class SetWakeupSIGCHLDTests(SynchronousTestCase):
"""
Tests for the L{signal.set_wakeup_fd} implementation of the
L{installHandler} and L{isDefaultHandler} APIs.
"""
def pipe(self):
"""
Create a non-blocking pipe which will be closed after the currently
running test.
"""
read, write = os.pipe()
self.addCleanup(os.close, read)
self.addCleanup(os.close, write)
setNonBlocking(read)
setNonBlocking(write)
return read, write
def setUp(self):
"""
Save the current SIGCHLD handler as reported by L{signal.signal} and
the current file descriptor registered with L{installHandler}.
"""
handler = signal.getsignal(signal.SIGCHLD)
if handler != signal.SIG_DFL:
self.signalModuleHandler = handler
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
else:
self.signalModuleHandler = None
self.oldFD = installHandler(-1)
if self.signalModuleHandler is not None and self.oldFD != -1:
msg("Previous test didn't clean up after its SIGCHLD setup: %r %r"
% (self.signalModuleHandler, self.oldFD))
def tearDown(self):
"""
Restore whatever signal handler was present when setUp ran.
"""
# If tests set up any kind of handlers, clear them out.
installHandler(-1)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
# Now restore whatever the setup was before the test ran.
if self.signalModuleHandler is not None:
signal.signal(signal.SIGCHLD, self.signalModuleHandler)
elif self.oldFD != -1:
installHandler(self.oldFD)
def test_isDefaultHandler(self):
"""
L{isDefaultHandler} returns true if the SIGCHLD handler is SIG_DFL,
false otherwise.
"""
self.assertTrue(isDefaultHandler())
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
self.assertFalse(isDefaultHandler())
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
self.assertTrue(isDefaultHandler())
signal.signal(signal.SIGCHLD, lambda *args: None)
self.assertFalse(isDefaultHandler())
def test_returnOldFD(self):
"""
L{installHandler} returns the previously registered file descriptor.
"""
read, write = self.pipe()
oldFD = installHandler(write)
self.assertEqual(installHandler(oldFD), write)
def test_uninstallHandler(self):
"""
C{installHandler(-1)} removes the SIGCHLD handler completely.
"""
read, write = self.pipe()
self.assertTrue(isDefaultHandler())
installHandler(write)
self.assertFalse(isDefaultHandler())
installHandler(-1)
self.assertTrue(isDefaultHandler())
def test_installHandler(self):
"""
The file descriptor passed to L{installHandler} has a byte written to
it when SIGCHLD is delivered to the process.
"""
read, write = self.pipe()
installHandler(write)
exc = self.assertRaises(OSError, os.read, read, 1)
self.assertEqual(exc.errno, errno.EAGAIN)
os.kill(os.getpid(), signal.SIGCHLD)
self.assertEqual(len(os.read(read, 5)), 1)

View file

@ -0,0 +1,273 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorSocket}.
Generally only tests for failure cases are found here. Success cases for
this interface are tested elsewhere. For example, the success case for
I{AF_INET} is in L{twisted.internet.test.test_tcp}, since that case should
behave exactly the same as L{IReactorTCP.listenTCP}.
"""
import errno, socket
from zope.interface import verify
from twisted.python.log import err
from twisted.internet.interfaces import IReactorSocket
from twisted.internet.error import UnsupportedAddressFamily
from twisted.internet.protocol import DatagramProtocol, ServerFactory
from twisted.internet.test.reactormixins import (
ReactorBuilder, needsRunningReactor)
from twisted.python.compat import _PY3
from twisted.python.runtime import platform
class IReactorSocketVerificationTestsBuilder(ReactorBuilder):
"""
Builder for testing L{IReactorSocket} implementations for required
methods and method signatures.
L{ReactorBuilder} already runs L{IReactorSocket.providedBy} to
ensure that these tests will only be run on reactor classes that
claim to implement L{IReactorSocket}.
These tests ensure that reactors which claim to provide the
L{IReactorSocket} interface actually have all the required methods
and that those methods have the expected number of arguments.
These tests will be skipped for reactors which do not claim to
provide L{IReactorSocket}.
"""
requiredInterfaces = [IReactorSocket]
def test_provider(self):
"""
The reactor instance returned by C{buildReactor} provides
L{IReactorSocket}.
"""
reactor = self.buildReactor()
self.assertTrue(
verify.verifyObject(IReactorSocket, reactor))
class AdoptStreamPortErrorsTestsBuilder(ReactorBuilder):
"""
Builder for testing L{IReactorSocket.adoptStreamPort} implementations.
Generally only tests for failure cases are found here. Success cases for
this interface are tested elsewhere. For example, the success case for
I{AF_INET} is in L{twisted.internet.test.test_tcp}, since that case should
behave exactly the same as L{IReactorTCP.listenTCP}.
"""
requiredInterfaces = [IReactorSocket]
def test_invalidDescriptor(self):
"""
An implementation of L{IReactorSocket.adoptStreamPort} raises
L{socket.error} if passed an integer which is not associated with a
socket.
"""
reactor = self.buildReactor()
probe = socket.socket()
fileno = probe.fileno()
probe.close()
exc = self.assertRaises(
socket.error,
reactor.adoptStreamPort, fileno, socket.AF_INET, ServerFactory())
if platform.isWindows() and _PY3:
self.assertEqual(exc.args[0], errno.WSAENOTSOCK)
else:
self.assertEqual(exc.args[0], errno.EBADF)
def test_invalidAddressFamily(self):
"""
An implementation of L{IReactorSocket.adoptStreamPort} raises
L{UnsupportedAddressFamily} if passed an address family it does not
support.
"""
reactor = self.buildReactor()
port = socket.socket()
port.bind(("127.0.0.1", 0))
port.listen(1)
self.addCleanup(port.close)
arbitrary = 2 ** 16 + 7
self.assertRaises(
UnsupportedAddressFamily,
reactor.adoptStreamPort, port.fileno(), arbitrary, ServerFactory())
def test_stopOnlyCloses(self):
"""
When the L{IListeningPort} returned by
L{IReactorSocket.adoptStreamPort} is stopped using
C{stopListening}, the underlying socket is closed but not
shutdown. This allows another process which still has a
reference to it to continue accepting connections over it.
"""
reactor = self.buildReactor()
portSocket = socket.socket()
self.addCleanup(portSocket.close)
portSocket.bind(("127.0.0.1", 0))
portSocket.listen(1)
portSocket.setblocking(False)
# The file descriptor is duplicated by adoptStreamPort
port = reactor.adoptStreamPort(
portSocket.fileno(), portSocket.family, ServerFactory())
d = port.stopListening()
def stopped(ignored):
# Should still be possible to accept a connection on
# portSocket. If it was shutdown, the exception would be
# EINVAL instead.
exc = self.assertRaises(socket.error, portSocket.accept)
if platform.isWindows() and _PY3:
self.assertEqual(exc.args[0], errno.WSAEWOULDBLOCK)
else:
self.assertEqual(exc.args[0], errno.EAGAIN)
d.addCallback(stopped)
d.addErrback(err, "Failed to accept on original port.")
needsRunningReactor(
reactor,
lambda: d.addCallback(lambda ignored: reactor.stop()))
reactor.run()
class AdoptStreamConnectionErrorsTestsBuilder(ReactorBuilder):
"""
Builder for testing L{IReactorSocket.adoptStreamConnection}
implementations.
Generally only tests for failure cases are found here. Success cases for
this interface are tested elsewhere. For example, the success case for
I{AF_INET} is in L{twisted.internet.test.test_tcp}, since that case should
behave exactly the same as L{IReactorTCP.listenTCP}.
"""
requiredInterfaces = [IReactorSocket]
def test_invalidAddressFamily(self):
"""
An implementation of L{IReactorSocket.adoptStreamConnection} raises
L{UnsupportedAddressFamily} if passed an address family it does not
support.
"""
reactor = self.buildReactor()
connection = socket.socket()
self.addCleanup(connection.close)
arbitrary = 2 ** 16 + 7
self.assertRaises(
UnsupportedAddressFamily,
reactor.adoptStreamConnection, connection.fileno(), arbitrary,
ServerFactory())
class AdoptDatagramPortErrorsTestsBuilder(ReactorBuilder):
"""
Builder for testing L{IReactorSocket.adoptDatagramPort} implementations.
"""
requiredInterfaces = [IReactorSocket]
def test_invalidDescriptor(self):
"""
An implementation of L{IReactorSocket.adoptDatagramPort} raises
L{socket.error} if passed an integer which is not associated with a
socket.
"""
reactor = self.buildReactor()
probe = socket.socket()
fileno = probe.fileno()
probe.close()
exc = self.assertRaises(
socket.error,
reactor.adoptDatagramPort, fileno, socket.AF_INET,
DatagramProtocol())
if platform.isWindows() and _PY3:
self.assertEqual(exc.args[0], errno.WSAENOTSOCK)
else:
self.assertEqual(exc.args[0], errno.EBADF)
def test_invalidAddressFamily(self):
"""
An implementation of L{IReactorSocket.adoptDatagramPort} raises
L{UnsupportedAddressFamily} if passed an address family it does not
support.
"""
reactor = self.buildReactor()
port = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(port.close)
arbitrary = 2 ** 16 + 7
self.assertRaises(
UnsupportedAddressFamily,
reactor.adoptDatagramPort, port.fileno(), arbitrary,
DatagramProtocol())
def test_stopOnlyCloses(self):
"""
When the L{IListeningPort} returned by
L{IReactorSocket.adoptDatagramPort} is stopped using
C{stopListening}, the underlying socket is closed but not
shutdown. This allows another process which still has a
reference to it to continue reading and writing to it.
"""
reactor = self.buildReactor()
portSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(portSocket.close)
portSocket.bind(("127.0.0.1", 0))
portSocket.setblocking(False)
# The file descriptor is duplicated by adoptDatagramPort
port = reactor.adoptDatagramPort(
portSocket.fileno(), portSocket.family, DatagramProtocol())
d = port.stopListening()
def stopped(ignored):
# Should still be possible to recv on portSocket. If
# it was shutdown, the exception would be EINVAL instead.
exc = self.assertRaises(socket.error, portSocket.recvfrom, 1)
if platform.isWindows() and _PY3:
self.assertEqual(exc.args[0], errno.WSAEWOULDBLOCK)
else:
self.assertEqual(exc.args[0], errno.EAGAIN)
d.addCallback(stopped)
d.addErrback(err, "Failed to read on original port.")
needsRunningReactor(
reactor,
lambda: d.addCallback(lambda ignored: reactor.stop()))
reactor.run()
globals().update(IReactorSocketVerificationTestsBuilder.makeTestCaseClasses())
globals().update(AdoptStreamPortErrorsTestsBuilder.makeTestCaseClasses())
globals().update(AdoptStreamConnectionErrorsTestsBuilder.makeTestCaseClasses())
globals().update(AdoptDatagramPortErrorsTestsBuilder.makeTestCaseClasses())

View file

@ -0,0 +1,199 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.stdio}.
"""
from __future__ import absolute_import, division
from twisted.python.runtime import platform
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.protocol import Protocol
if not platform.isWindows():
from twisted.internet.stdio import StandardIO
class StdioFilesTests(ReactorBuilder):
"""
L{StandardIO} supports reading and writing to filesystem files.
"""
def setUp(self):
path = self.mktemp()
open(path, "wb").close()
self.extraFile = open(path, "rb+")
self.addCleanup(self.extraFile.close)
def test_addReader(self):
"""
Adding a filesystem file reader to a reactor will make sure it is
polled.
"""
reactor = self.buildReactor()
class DataProtocol(Protocol):
data = b""
def dataReceived(self, data):
self.data += data
# It'd be better to stop reactor on connectionLost, but that
# fails on FreeBSD, probably due to
# http://bugs.python.org/issue9591:
if self.data == b"hello!":
reactor.stop()
path = self.mktemp()
with open(path, "wb") as f:
f.write(b"hello!")
with open(path, "rb") as f:
# Read bytes from a file, deliver them to a protocol instance:
protocol = DataProtocol()
StandardIO(protocol, stdin=f.fileno(),
stdout=self.extraFile.fileno(),
reactor=reactor)
self.runReactor(reactor)
self.assertEqual(protocol.data, b"hello!")
def test_addWriter(self):
"""
Adding a filesystem file writer to a reactor will make sure it is
polled.
"""
reactor = self.buildReactor()
class DisconnectProtocol(Protocol):
def connectionLost(self, reason):
reactor.stop()
path = self.mktemp()
with open(path, "wb") as f:
# Write bytes to a transport, hopefully have them written to a
# file:
protocol = DisconnectProtocol()
StandardIO(protocol, stdout=f.fileno(),
stdin=self.extraFile.fileno(), reactor=reactor)
protocol.transport.write(b"hello")
protocol.transport.write(b", world")
protocol.transport.loseConnection()
self.runReactor(reactor)
with open(path, "rb") as f:
self.assertEqual(f.read(), b"hello, world")
def test_removeReader(self):
"""
Removing a filesystem file reader from a reactor will make sure it is
no longer polled.
"""
reactor = self.buildReactor()
self.addCleanup(self.unbuildReactor, reactor)
path = self.mktemp()
open(path, "wb").close()
with open(path, "rb") as f:
# Have the reader added:
stdio = StandardIO(Protocol(), stdin=f.fileno(),
stdout=self.extraFile.fileno(),
reactor=reactor)
self.assertIn(stdio._reader, reactor.getReaders())
stdio._reader.stopReading()
self.assertNotIn(stdio._reader, reactor.getReaders())
def test_removeWriter(self):
"""
Removing a filesystem file writer from a reactor will make sure it is
no longer polled.
"""
reactor = self.buildReactor()
self.addCleanup(self.unbuildReactor, reactor)
# Cleanup might fail if file is GCed too soon:
self.f = f = open(self.mktemp(), "wb")
# Have the reader added:
protocol = Protocol()
stdio = StandardIO(protocol, stdout=f.fileno(),
stdin=self.extraFile.fileno(),
reactor=reactor)
protocol.transport.write(b"hello")
self.assertIn(stdio._writer, reactor.getWriters())
stdio._writer.stopWriting()
self.assertNotIn(stdio._writer, reactor.getWriters())
def test_removeAll(self):
"""
Calling C{removeAll} on a reactor includes descriptors that are
filesystem files.
"""
reactor = self.buildReactor()
self.addCleanup(self.unbuildReactor, reactor)
path = self.mktemp()
open(path, "wb").close()
# Cleanup might fail if file is GCed too soon:
self.f = f = open(path, "rb")
# Have the reader added:
stdio = StandardIO(Protocol(), stdin=f.fileno(),
stdout=self.extraFile.fileno(), reactor=reactor)
# And then removed:
removed = reactor.removeAll()
self.assertIn(stdio._reader, removed)
self.assertNotIn(stdio._reader, reactor.getReaders())
def test_getReaders(self):
"""
C{reactor.getReaders} includes descriptors that are filesystem files.
"""
reactor = self.buildReactor()
self.addCleanup(self.unbuildReactor, reactor)
path = self.mktemp()
open(path, "wb").close()
# Cleanup might fail if file is GCed too soon:
with open(path, "rb") as f:
# Have the reader added:
stdio = StandardIO(Protocol(), stdin=f.fileno(),
stdout=self.extraFile.fileno(), reactor=reactor)
self.assertIn(stdio._reader, reactor.getReaders())
def test_getWriters(self):
"""
C{reactor.getWriters} includes descriptors that are filesystem files.
"""
reactor = self.buildReactor()
self.addCleanup(self.unbuildReactor, reactor)
# Cleanup might fail if file is GCed too soon:
self.f = f = open(self.mktemp(), "wb")
# Have the reader added:
stdio = StandardIO(Protocol(), stdout=f.fileno(),
stdin=self.extraFile.fileno(), reactor=reactor)
self.assertNotIn(stdio._writer, reactor.getWriters())
stdio._writer.startWriting()
self.assertIn(stdio._writer, reactor.getWriters())
if platform.isWindows():
skip = ("StandardIO does not accept stdout as an argument to Windows. "
"Testing redirection to a file is therefore harder.")
globals().update(StdioFilesTests.makeTestCaseClasses())

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,515 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.testing}.
"""
from zope.interface.verify import verifyObject
from twisted.internet.interfaces import (
ITransport,
IPushProducer,
IConsumer,
IReactorTCP,
IReactorSSL,
IReactorUNIX,
IAddress,
IListeningPort,
IConnector
)
from twisted.internet.address import IPv4Address
from twisted.trial.unittest import TestCase
from twisted.internet.testing import (
StringTransport,
MemoryReactor,
RaisingMemoryReactor,
NonStreamingProducer
)
from twisted.internet.protocol import ClientFactory, Factory
from twisted.python.reflect import namedAny
class StringTransportTests(TestCase):
"""
Tests for L{twisted.internet.testing.StringTransport}.
"""
def setUp(self):
self.transport = StringTransport()
def test_interfaces(self):
"""
L{StringTransport} instances provide L{ITransport}, L{IPushProducer},
and L{IConsumer}.
"""
self.assertTrue(verifyObject(ITransport, self.transport))
self.assertTrue(verifyObject(IPushProducer, self.transport))
self.assertTrue(verifyObject(IConsumer, self.transport))
def test_registerProducer(self):
"""
L{StringTransport.registerProducer} records the arguments supplied to
it as instance attributes.
"""
producer = object()
streaming = object()
self.transport.registerProducer(producer, streaming)
self.assertIs(self.transport.producer, producer)
self.assertIs(self.transport.streaming, streaming)
def test_disallowedRegisterProducer(self):
"""
L{StringTransport.registerProducer} raises L{RuntimeError} if a
producer is already registered.
"""
producer = object()
self.transport.registerProducer(producer, True)
self.assertRaises(
RuntimeError, self.transport.registerProducer, object(), False)
self.assertIs(self.transport.producer, producer)
self.assertTrue(self.transport.streaming)
def test_unregisterProducer(self):
"""
L{StringTransport.unregisterProducer} causes the transport to forget
about the registered producer and makes it possible to register a new
one.
"""
oldProducer = object()
newProducer = object()
self.transport.registerProducer(oldProducer, False)
self.transport.unregisterProducer()
self.assertIsNone(self.transport.producer)
self.transport.registerProducer(newProducer, True)
self.assertIs(self.transport.producer, newProducer)
self.assertTrue(self.transport.streaming)
def test_invalidUnregisterProducer(self):
"""
L{StringTransport.unregisterProducer} raises L{RuntimeError} if called
when no producer is registered.
"""
self.assertRaises(RuntimeError, self.transport.unregisterProducer)
def test_initialProducerState(self):
"""
L{StringTransport.producerState} is initially C{'producing'}.
"""
self.assertEqual(self.transport.producerState, 'producing')
def test_pauseProducing(self):
"""
L{StringTransport.pauseProducing} changes the C{producerState} of the
transport to C{'paused'}.
"""
self.transport.pauseProducing()
self.assertEqual(self.transport.producerState, 'paused')
def test_resumeProducing(self):
"""
L{StringTransport.resumeProducing} changes the C{producerState} of the
transport to C{'producing'}.
"""
self.transport.pauseProducing()
self.transport.resumeProducing()
self.assertEqual(self.transport.producerState, 'producing')
def test_stopProducing(self):
"""
L{StringTransport.stopProducing} changes the C{'producerState'} of the
transport to C{'stopped'}.
"""
self.transport.stopProducing()
self.assertEqual(self.transport.producerState, 'stopped')
def test_stoppedTransportCannotPause(self):
"""
L{StringTransport.pauseProducing} raises L{RuntimeError} if the
transport has been stopped.
"""
self.transport.stopProducing()
self.assertRaises(RuntimeError, self.transport.pauseProducing)
def test_stoppedTransportCannotResume(self):
"""
L{StringTransport.resumeProducing} raises L{RuntimeError} if the
transport has been stopped.
"""
self.transport.stopProducing()
self.assertRaises(RuntimeError, self.transport.resumeProducing)
def test_disconnectingTransportCannotPause(self):
"""
L{StringTransport.pauseProducing} raises L{RuntimeError} if the
transport is being disconnected.
"""
self.transport.loseConnection()
self.assertRaises(RuntimeError, self.transport.pauseProducing)
def test_disconnectingTransportCannotResume(self):
"""
L{StringTransport.resumeProducing} raises L{RuntimeError} if the
transport is being disconnected.
"""
self.transport.loseConnection()
self.assertRaises(RuntimeError, self.transport.resumeProducing)
def test_loseConnectionSetsDisconnecting(self):
"""
L{StringTransport.loseConnection} toggles the C{disconnecting} instance
variable to C{True}.
"""
self.assertFalse(self.transport.disconnecting)
self.transport.loseConnection()
self.assertTrue(self.transport.disconnecting)
def test_specifiedHostAddress(self):
"""
If a host address is passed to L{StringTransport.__init__}, that
value is returned from L{StringTransport.getHost}.
"""
address = object()
self.assertIs(StringTransport(address).getHost(), address)
def test_specifiedPeerAddress(self):
"""
If a peer address is passed to L{StringTransport.__init__}, that
value is returned from L{StringTransport.getPeer}.
"""
address = object()
self.assertIs(
StringTransport(peerAddress=address).getPeer(), address)
def test_defaultHostAddress(self):
"""
If no host address is passed to L{StringTransport.__init__}, an
L{IPv4Address} is returned from L{StringTransport.getHost}.
"""
address = StringTransport().getHost()
self.assertIsInstance(address, IPv4Address)
def test_defaultPeerAddress(self):
"""
If no peer address is passed to L{StringTransport.__init__}, an
L{IPv4Address} is returned from L{StringTransport.getPeer}.
"""
address = StringTransport().getPeer()
self.assertIsInstance(address, IPv4Address)
class ReactorTests(TestCase):
"""
Tests for L{MemoryReactor} and L{RaisingMemoryReactor}.
"""
def test_memoryReactorProvides(self):
"""
L{MemoryReactor} provides all of the attributes described by the
interfaces it advertises.
"""
memoryReactor = MemoryReactor()
verifyObject(IReactorTCP, memoryReactor)
verifyObject(IReactorSSL, memoryReactor)
verifyObject(IReactorUNIX, memoryReactor)
def test_raisingReactorProvides(self):
"""
L{RaisingMemoryReactor} provides all of the attributes described by the
interfaces it advertises.
"""
raisingReactor = RaisingMemoryReactor()
verifyObject(IReactorTCP, raisingReactor)
verifyObject(IReactorSSL, raisingReactor)
verifyObject(IReactorUNIX, raisingReactor)
def test_connectDestination(self):
"""
L{MemoryReactor.connectTCP}, L{MemoryReactor.connectSSL}, and
L{MemoryReactor.connectUNIX} will return an L{IConnector} whose
C{getDestination} method returns an L{IAddress} with attributes which
reflect the values passed.
"""
memoryReactor = MemoryReactor()
for connector in [memoryReactor.connectTCP(
"test.example.com", 8321, ClientFactory()),
memoryReactor.connectSSL(
"test.example.com", 8321, ClientFactory(),
None)]:
verifyObject(IConnector, connector)
address = connector.getDestination()
verifyObject(IAddress, address)
self.assertEqual(address.host, "test.example.com")
self.assertEqual(address.port, 8321)
connector = memoryReactor.connectUNIX(b"/fake/path", ClientFactory())
verifyObject(IConnector, connector)
address = connector.getDestination()
verifyObject(IAddress, address)
self.assertEqual(address.name, b"/fake/path")
def test_listenDefaultHost(self):
"""
L{MemoryReactor.listenTCP}, L{MemoryReactor.listenSSL} and
L{MemoryReactor.listenUNIX} will return an L{IListeningPort} whose
C{getHost} method returns an L{IAddress}; C{listenTCP} and C{listenSSL}
will have a default host of C{'0.0.0.0'}, and a port that reflects the
value passed, and C{listenUNIX} will have a name that reflects the path
passed.
"""
memoryReactor = MemoryReactor()
for port in [memoryReactor.listenTCP(8242, Factory()),
memoryReactor.listenSSL(8242, Factory(), None)]:
verifyObject(IListeningPort, port)
address = port.getHost()
verifyObject(IAddress, address)
self.assertEqual(address.host, '0.0.0.0')
self.assertEqual(address.port, 8242)
port = memoryReactor.listenUNIX(b"/path/to/socket", Factory())
verifyObject(IListeningPort, port)
address = port.getHost()
verifyObject(IAddress, address)
self.assertEqual(address.name, b"/path/to/socket")
def test_readers(self):
"""
Adding, removing, and listing readers works.
"""
reader = object()
reactor = MemoryReactor()
reactor.addReader(reader)
reactor.addReader(reader)
self.assertEqual(reactor.getReaders(), [reader])
reactor.removeReader(reader)
self.assertEqual(reactor.getReaders(), [])
def test_writers(self):
"""
Adding, removing, and listing writers works.
"""
writer = object()
reactor = MemoryReactor()
reactor.addWriter(writer)
reactor.addWriter(writer)
self.assertEqual(reactor.getWriters(), [writer])
reactor.removeWriter(writer)
self.assertEqual(reactor.getWriters(), [])
class TestConsumer(object):
"""
A very basic test consumer for use with the NonStreamingProducerTests.
"""
def __init__(self):
self.writes = []
self.producer = None
self.producerStreaming = None
def registerProducer(self, producer, streaming):
"""
Registers a single producer with this consumer. Just keeps track of it.
@param producer: The producer to register.
@param streaming: Whether the producer is a streaming one or not.
"""
self.producer = producer
self.producerStreaming = streaming
def unregisterProducer(self):
"""
Forget the producer we had previously registered.
"""
self.producer = None
self.producerStreaming = None
def write(self, data):
"""
Some data was written to the consumer: stores it for later use.
@param data: The data to write.
"""
self.writes.append(data)
class NonStreamingProducerTests(TestCase):
"""
Tests for the L{NonStreamingProducer} to validate behaviour.
"""
def test_producesOnly10Times(self):
"""
When the L{NonStreamingProducer} has resumeProducing called 10 times,
it writes the counter each time and then fails.
"""
consumer = TestConsumer()
producer = NonStreamingProducer(consumer)
consumer.registerProducer(producer, False)
self.assertIs(consumer.producer, producer)
self.assertIs(producer.consumer, consumer)
self.assertFalse(consumer.producerStreaming)
for _ in range(10):
producer.resumeProducing()
# We should have unregistered the producer and printed the 10 results.
expectedWrites = [
b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9'
]
self.assertIsNone(consumer.producer)
self.assertIsNone(consumer.producerStreaming)
self.assertIsNone(producer.consumer)
self.assertEqual(consumer.writes, expectedWrites)
# Another attempt to produce fails.
self.assertRaises(RuntimeError, producer.resumeProducing)
def test_cannotPauseProduction(self):
"""
When the L{NonStreamingProducer} is paused, it raises a
L{RuntimeError}.
"""
consumer = TestConsumer()
producer = NonStreamingProducer(consumer)
consumer.registerProducer(producer, False)
# Produce once, just to be safe.
producer.resumeProducing()
self.assertRaises(RuntimeError, producer.pauseProducing)
class DeprecationTests(TestCase):
"""
Deprecations in L{twisted.test.proto_helpers}.
"""
def helper(self, test, obj):
new_path = 'twisted.internet.testing.{}'.format(obj.__name__)
warnings = self.flushWarnings(
[test])
self.assertEqual(DeprecationWarning, warnings[0]['category'])
self.assertEqual(1, len(warnings))
self.assertIn(new_path, warnings[0]['message'])
self.assertIs(obj, namedAny(new_path))
def test_accumulatingProtocol(self):
from twisted.test.proto_helpers import AccumulatingProtocol
self.helper(self.test_accumulatingProtocol,
AccumulatingProtocol)
def test_lineSendingProtocol(self):
from twisted.test.proto_helpers import LineSendingProtocol
self.helper(self.test_lineSendingProtocol,
LineSendingProtocol)
def test_fakeDatagramTransport(self):
from twisted.test.proto_helpers import FakeDatagramTransport
self.helper(self.test_fakeDatagramTransport,
FakeDatagramTransport)
def test_stringTransport(self):
from twisted.test.proto_helpers import StringTransport
self.helper(self.test_stringTransport,
StringTransport)
def test_stringTransportWithDisconnection(self):
from twisted.test.proto_helpers import (
StringTransportWithDisconnection)
self.helper(self.test_stringTransportWithDisconnection,
StringTransportWithDisconnection)
def test_stringIOWithoutClosing(self):
from twisted.test.proto_helpers import StringIOWithoutClosing
self.helper(self.test_stringIOWithoutClosing,
StringIOWithoutClosing)
def test__fakeConnector(self):
from twisted.test.proto_helpers import _FakeConnector
self.helper(self.test__fakeConnector,
_FakeConnector)
def test__fakePort(self):
from twisted.test.proto_helpers import _FakePort
self.helper(self.test__fakePort,
_FakePort)
def test_memoryReactor(self):
from twisted.test.proto_helpers import MemoryReactor
self.helper(self.test_memoryReactor,
MemoryReactor)
def test_memoryReactorClock(self):
from twisted.test.proto_helpers import MemoryReactorClock
self.helper(self.test_memoryReactorClock,
MemoryReactorClock)
def test_raisingMemoryReactor(self):
from twisted.test.proto_helpers import RaisingMemoryReactor
self.helper(self.test_raisingMemoryReactor,
RaisingMemoryReactor)
def test_nonStreamingProducer(self):
from twisted.test.proto_helpers import NonStreamingProducer
self.helper(self.test_nonStreamingProducer,
NonStreamingProducer)
def test_waitUntilAllDisconnected(self):
from twisted.test.proto_helpers import (
waitUntilAllDisconnected)
self.helper(self.test_waitUntilAllDisconnected,
waitUntilAllDisconnected)
def test_eventLoggingObserver(self):
from twisted.test.proto_helpers import EventLoggingObserver
self.helper(self.test_eventLoggingObserver,
EventLoggingObserver)

View file

@ -0,0 +1,232 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorThreads}.
"""
from __future__ import division, absolute_import
__metaclass__ = type
from weakref import ref
import gc, threading
from twisted.python.threadable import isInIOThread
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.python.threadpool import ThreadPool
from twisted.internet.interfaces import IReactorThreads
class ThreadTestsBuilder(ReactorBuilder):
"""
Builder for defining tests relating to L{IReactorThreads}.
"""
requiredInterfaces = (IReactorThreads,)
def test_getThreadPool(self):
"""
C{reactor.getThreadPool()} returns an instance of L{ThreadPool} which
starts when C{reactor.run()} is called and stops before it returns.
"""
state = []
reactor = self.buildReactor()
pool = reactor.getThreadPool()
self.assertIsInstance(pool, ThreadPool)
self.assertFalse(
pool.started, "Pool should not start before reactor.run")
def f():
# Record the state for later assertions
state.append(pool.started)
state.append(pool.joined)
reactor.stop()
reactor.callWhenRunning(f)
self.runReactor(reactor, 2)
self.assertTrue(
state[0], "Pool should start after reactor.run")
self.assertFalse(
state[1], "Pool should not be joined before reactor.stop")
self.assertTrue(
pool.joined,
"Pool should be stopped after reactor.run returns")
def test_suggestThreadPoolSize(self):
"""
C{reactor.suggestThreadPoolSize()} sets the maximum size of the reactor
threadpool.
"""
reactor = self.buildReactor()
reactor.suggestThreadPoolSize(17)
pool = reactor.getThreadPool()
self.assertEqual(pool.max, 17)
def test_delayedCallFromThread(self):
"""
A function scheduled with L{IReactorThreads.callFromThread} invoked
from a delayed call is run immediately in the next reactor iteration.
When invoked from the reactor thread, previous implementations of
L{IReactorThreads.callFromThread} would skip the pipe/socket based wake
up step, assuming the reactor would wake up on its own. However, this
resulted in the reactor not noticing an insert into the thread queue at
the right time (in this case, after the thread queue has been processed
for that reactor iteration).
"""
reactor = self.buildReactor()
def threadCall():
reactor.stop()
# Set up the use of callFromThread being tested.
reactor.callLater(0, reactor.callFromThread, threadCall)
before = reactor.seconds()
self.runReactor(reactor, 60)
after = reactor.seconds()
# We specified a timeout of 60 seconds. The timeout code in runReactor
# probably won't actually work, though. If the reactor comes out of
# the event notification API just a little bit early, say after 59.9999
# seconds instead of after 60 seconds, then the queued thread call will
# get processed but the timeout delayed call runReactor sets up won't!
# Then the reactor will stop and runReactor will return without the
# timeout firing. As it turns out, select() and poll() are quite
# likely to return *slightly* earlier than we ask them to, so the
# timeout will rarely happen, even if callFromThread is broken. So,
# instead we'll measure the elapsed time and make sure it's something
# less than about half of the timeout we specified. This is heuristic.
# It assumes that select() won't ever return after 30 seconds when we
# asked it to timeout after 60 seconds. And of course like all
# time-based tests, it's slightly non-deterministic. If the OS doesn't
# schedule this process for 30 seconds, then the test might fail even
# if callFromThread is working.
self.assertTrue(after - before < 30)
def test_callFromThread(self):
"""
A function scheduled with L{IReactorThreads.callFromThread} invoked
from another thread is run in the reactor thread.
"""
reactor = self.buildReactor()
result = []
def threadCall():
result.append(threading.currentThread())
reactor.stop()
reactor.callLater(0, reactor.callInThread,
reactor.callFromThread, threadCall)
self.runReactor(reactor, 5)
self.assertEqual(result, [threading.currentThread()])
def test_stopThreadPool(self):
"""
When the reactor stops, L{ReactorBase._stopThreadPool} drops the
reactor's direct reference to its internal threadpool and removes
the associated startup and shutdown triggers.
This is the case of the thread pool being created before the reactor
is run.
"""
reactor = self.buildReactor()
threadpool = ref(reactor.getThreadPool())
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
gc.collect()
self.assertIsNone(threadpool())
def test_stopThreadPoolWhenStartedAfterReactorRan(self):
"""
We must handle the case of shutting down the thread pool when it was
started after the reactor was run in a special way.
Some implementation background: The thread pool is started with
callWhenRunning, which only returns a system trigger ID when it is
invoked before the reactor is started.
This is the case of the thread pool being created after the reactor
is started.
"""
reactor = self.buildReactor()
threadPoolRefs = []
def acquireThreadPool():
threadPoolRefs.append(ref(reactor.getThreadPool()))
reactor.stop()
reactor.callWhenRunning(acquireThreadPool)
self.runReactor(reactor)
gc.collect()
self.assertIsNone(threadPoolRefs[0]())
def test_cleanUpThreadPoolEvenBeforeReactorIsRun(self):
"""
When the reactor has its shutdown event fired before it is run, the
thread pool is completely destroyed.
For what it's worth, the reason we support this behavior at all is
because Trial does this.
This is the case of the thread pool being created without the reactor
being started at al.
"""
reactor = self.buildReactor()
threadPoolRef = ref(reactor.getThreadPool())
reactor.fireSystemEvent("shutdown")
if reactor.__class__.__name__ == "AsyncioSelectorReactor":
self.assertIsNone(reactor.threadpool)
# ReactorBase.__init__ sets self.crash as a 'shutdown'
# event, which in turn calls stop on the underlying
# asyncio event loop, which in turn sets a _stopping
# attribute on it that's only unset after an iteration of
# the loop. Subsequent tests can only reuse the asyncio
# loop if it's allowed to run and unset that _stopping
# attribute.
self.runReactor(reactor)
else:
gc.collect()
self.assertIsNone(threadPoolRef())
def test_isInIOThread(self):
"""
The reactor registers itself as the I/O thread when it runs so that
L{twisted.python.threadable.isInIOThread} returns C{True} if it is
called in the thread the reactor is running in.
"""
results = []
reactor = self.buildReactor()
def check():
results.append(isInIOThread())
reactor.stop()
reactor.callWhenRunning(check)
self.runReactor(reactor)
self.assertEqual([True], results)
def test_isNotInIOThread(self):
"""
The reactor registers itself as the I/O thread when it runs so that
L{twisted.python.threadable.isInIOThread} returns C{False} if it is
called in a different thread than the reactor is running in.
"""
results = []
reactor = self.buildReactor()
def check():
results.append(isInIOThread())
reactor.callFromThread(reactor.stop)
reactor.callInThread(check)
self.runReactor(reactor)
self.assertEqual([False], results)
globals().update(ThreadTestsBuilder.makeTestCaseClasses())

View file

@ -0,0 +1,112 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorTime}.
"""
__metaclass__ = type
from twisted.python.log import msg
from twisted.python.runtime import platform
from twisted.trial.unittest import SkipTest
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.interfaces import IReactorTime, IReactorThreads
class TimeTestsBuilder(ReactorBuilder):
"""
Builder for defining tests relating to L{IReactorTime}.
"""
requiredInterfaces = (IReactorTime,)
def test_delayedCallStopsReactor(self):
"""
The reactor can be stopped by a delayed call.
"""
reactor = self.buildReactor()
reactor.callLater(0, reactor.stop)
reactor.run()
def test_distantDelayedCall(self):
"""
Scheduling a delayed call at a point in the extreme future does not
prevent normal reactor operation.
"""
reactor = self.buildReactor()
if IReactorThreads.providedBy(reactor):
def eventSource(reactor, event):
msg(format="Thread-based event-source scheduling %(event)r",
event=event)
reactor.callFromThread(event)
else:
raise SkipTest("Do not know how to synthesize non-time event to "
"stop the test")
# Pick a pretty big delay.
delayedCall = reactor.callLater(2 ** 128 + 1, lambda: None)
def stop():
msg("Stopping the reactor")
reactor.stop()
# Use repeated invocation of the event source to set up the call to stop
# the reactor. This makes it more likely at least one normal iteration
# will take place with the delayed call in place before the slightly
# different reactor shutdown logic alters things.
eventSource(reactor, lambda: eventSource(reactor, stop))
# Run the reactor directly, without a timeout. A timeout would
# interfere with the purpose of this test, which is to have the timeout
# passed to the reactor's doIterate implementation (potentially) be
# very, very large. Hopefully the event source defined above will work
# and cause the reactor to stop.
reactor.run()
# The reactor almost surely stopped before the delayed call
# fired... right?
self.assertTrue(delayedCall.active())
self.assertIn(delayedCall, reactor.getDelayedCalls())
class GlibTimeTestsBuilder(ReactorBuilder):
"""
Builder for defining tests relating to L{IReactorTime} for reactors based
off glib.
"""
requiredInterfaces = (IReactorTime,)
if platform.isWindows():
_reactors = ["twisted.internet.gtk2reactor.PortableGtkReactor"]
else:
_reactors = ["twisted.internet.glib2reactor.Glib2Reactor",
"twisted.internet.gtk2reactor.Gtk2Reactor"]
def test_timeout_add(self):
"""
A
L{reactor.callLater<twisted.internet.interfaces.IReactorTime.callLater>}
call scheduled from a C{gobject.timeout_add}
call is run on time.
"""
import gobject
reactor = self.buildReactor()
result = []
def gschedule():
reactor.callLater(0, callback)
return 0
def callback():
result.append(True)
reactor.stop()
reactor.callWhenRunning(gobject.timeout_add, 10, gschedule)
self.runReactor(reactor, 5)
self.assertEqual(result, [True])
globals().update(TimeTestsBuilder.makeTestCaseClasses())
globals().update(GlibTimeTestsBuilder.makeTestCaseClasses())

View file

@ -0,0 +1,386 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{ITLSTransport}.
"""
from __future__ import division, absolute_import
__metaclass__ = type
from zope.interface import implementer
from twisted.python.compat import networkString
from twisted.python.filepath import FilePath
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.protocol import ServerFactory, ClientFactory, Protocol
from twisted.internet.interfaces import (
IReactorSSL, ITLSTransport, IStreamClientEndpoint)
from twisted.internet.defer import Deferred, DeferredList
from twisted.internet.endpoints import (
SSL4ServerEndpoint, SSL4ClientEndpoint, TCP4ClientEndpoint)
from twisted.internet.error import ConnectionClosed
from twisted.internet.task import Cooperator
from twisted.trial.unittest import SkipTest
from twisted.python.runtime import platform
from twisted.internet.test.test_core import ObjectModelIntegrationMixin
from twisted.internet.test.test_tcp import (
ConnectToTCPListenerMixin, StreamTransportTestsMixin, AbortConnectionMixin,
)
from twisted.internet.test.connectionmixins import (
EndpointCreator, ConnectionTestsMixin, BrokenContextFactory)
try:
from OpenSSL.crypto import FILETYPE_PEM
except ImportError:
FILETYPE_PEM = None
else:
from twisted.internet.ssl import PrivateCertificate, KeyPair
from twisted.internet.ssl import ClientContextFactory
class TLSMixin:
requiredInterfaces = [IReactorSSL]
if platform.isWindows():
msg = (
"For some reason, these reactors don't deal with SSL "
"disconnection correctly on Windows. See #3371.")
skippedReactors = {
"twisted.internet.glib2reactor.Glib2Reactor": msg,
"twisted.internet.gtk2reactor.Gtk2Reactor": msg}
class ContextGeneratingMixin(object):
import twisted
_pem = FilePath(
networkString(twisted.__file__)).sibling(b"test").child(b"server.pem")
del twisted
def getServerContext(self):
"""
Return a new SSL context suitable for use in a test server.
"""
pem = self._pem.getContent()
cert = PrivateCertificate.load(
pem, KeyPair.load(pem, FILETYPE_PEM), FILETYPE_PEM)
return cert.options()
def getClientContext(self):
return ClientContextFactory()
@implementer(IStreamClientEndpoint)
class StartTLSClientEndpoint(object):
"""
An endpoint which wraps another one and adds a TLS layer immediately when
connections are set up.
@ivar wrapped: A L{IStreamClientEndpoint} provider which will be used to
really set up connections.
@ivar contextFactory: A L{ContextFactory} to use to do TLS.
"""
def __init__(self, wrapped, contextFactory):
self.wrapped = wrapped
self.contextFactory = contextFactory
def connect(self, factory):
"""
Establish a connection using a protocol build by C{factory} and
immediately start TLS on it. Return a L{Deferred} which fires with the
protocol instance.
"""
# This would be cleaner when we have ITransport.switchProtocol, which
# will be added with ticket #3204:
class WrapperFactory(ServerFactory):
def buildProtocol(wrapperSelf, addr):
protocol = factory.buildProtocol(addr)
def connectionMade(orig=protocol.connectionMade):
protocol.transport.startTLS(self.contextFactory)
orig()
protocol.connectionMade = connectionMade
return protocol
return self.wrapped.connect(WrapperFactory())
class StartTLSClientCreator(EndpointCreator, ContextGeneratingMixin):
"""
Create L{ITLSTransport.startTLS} endpoint for the client, and normal SSL
for server just because it's easier.
"""
def server(self, reactor):
"""
Construct an SSL server endpoint. This should be constructing a TCP
server endpoint which immediately calls C{startTLS} instead, but that
is hard.
"""
return SSL4ServerEndpoint(reactor, 0, self.getServerContext())
def client(self, reactor, serverAddress):
"""
Construct a TCP client endpoint wrapped to immediately start TLS.
"""
return StartTLSClientEndpoint(
TCP4ClientEndpoint(
reactor, '127.0.0.1', serverAddress.port),
ClientContextFactory())
class BadContextTestsMixin(object):
"""
Mixin for L{ReactorBuilder} subclasses which defines a helper for testing
the handling of broken context factories.
"""
def _testBadContext(self, useIt):
"""
Assert that the exception raised by a broken context factory's
C{getContext} method is raised by some reactor method. If it is not, an
exception will be raised to fail the test.
@param useIt: A two-argument callable which will be called with a
reactor and a broken context factory and which is expected to raise
the same exception as the broken context factory's C{getContext}
method.
"""
reactor = self.buildReactor()
exc = self.assertRaises(
ValueError, useIt, reactor, BrokenContextFactory())
self.assertEqual(BrokenContextFactory.message, str(exc))
class StartTLSClientTestsMixin(TLSMixin, ReactorBuilder, ConnectionTestsMixin):
"""
Tests for TLS connections established using L{ITLSTransport.startTLS} (as
opposed to L{IReactorSSL.connectSSL} or L{IReactorSSL.listenSSL}).
"""
endpoints = StartTLSClientCreator()
class SSLCreator(EndpointCreator, ContextGeneratingMixin):
"""
Create SSL endpoints.
"""
def server(self, reactor):
"""
Create an SSL server endpoint on a TCP/IP-stack allocated port.
"""
return SSL4ServerEndpoint(reactor, 0, self.getServerContext())
def client(self, reactor, serverAddress):
"""
Create an SSL client endpoint which will connect localhost on
the port given by C{serverAddress}.
@type serverAddress: L{IPv4Address}
"""
return SSL4ClientEndpoint(
reactor, '127.0.0.1', serverAddress.port,
ClientContextFactory())
class SSLClientTestsMixin(TLSMixin, ReactorBuilder, ContextGeneratingMixin,
ConnectionTestsMixin, BadContextTestsMixin):
"""
Mixin defining tests relating to L{ITLSTransport}.
"""
endpoints = SSLCreator()
def test_badContext(self):
"""
If the context factory passed to L{IReactorSSL.connectSSL} raises an
exception from its C{getContext} method, that exception is raised by
L{IReactorSSL.connectSSL}.
"""
def useIt(reactor, contextFactory):
return reactor.connectSSL(
"127.0.0.1", 1234, ClientFactory(), contextFactory)
self._testBadContext(useIt)
def test_disconnectAfterWriteAfterStartTLS(self):
"""
L{ITCPTransport.loseConnection} ends a connection which was set up with
L{ITLSTransport.startTLS} and which has recently been written to. This
is intended to verify that a socket send error masked by the TLS
implementation doesn't prevent the connection from being reported as
closed.
"""
class ShortProtocol(Protocol):
def connectionMade(self):
if not ITLSTransport.providedBy(self.transport):
# Functionality isn't available to be tested.
finished = self.factory.finished
self.factory.finished = None
finished.errback(SkipTest("No ITLSTransport support"))
return
# Switch the transport to TLS.
self.transport.startTLS(self.factory.context)
# Force TLS to really get negotiated. If nobody talks, nothing
# will happen.
self.transport.write(b"x")
def dataReceived(self, data):
# Stuff some bytes into the socket. This mostly has the effect
# of causing the next write to fail with ENOTCONN or EPIPE.
# With the pyOpenSSL implementation of ITLSTransport, the error
# is swallowed outside of the control of Twisted.
self.transport.write(b"y")
# Now close the connection, which requires a TLS close alert to
# be sent.
self.transport.loseConnection()
def connectionLost(self, reason):
# This is the success case. The client and the server want to
# get here.
finished = self.factory.finished
if finished is not None:
self.factory.finished = None
finished.callback(reason)
reactor = self.buildReactor()
serverFactory = ServerFactory()
serverFactory.finished = Deferred()
serverFactory.protocol = ShortProtocol
serverFactory.context = self.getServerContext()
clientFactory = ClientFactory()
clientFactory.finished = Deferred()
clientFactory.protocol = ShortProtocol
clientFactory.context = self.getClientContext()
clientFactory.context.method = serverFactory.context.method
lostConnectionResults = []
finished = DeferredList(
[serverFactory.finished, clientFactory.finished],
consumeErrors=True)
def cbFinished(results):
lostConnectionResults.extend([results[0][1], results[1][1]])
finished.addCallback(cbFinished)
port = reactor.listenTCP(0, serverFactory, interface='127.0.0.1')
self.addCleanup(port.stopListening)
connector = reactor.connectTCP(
port.getHost().host, port.getHost().port, clientFactory)
self.addCleanup(connector.disconnect)
finished.addCallback(lambda ign: reactor.stop())
self.runReactor(reactor)
lostConnectionResults[0].trap(ConnectionClosed)
lostConnectionResults[1].trap(ConnectionClosed)
class TLSPortTestsBuilder(TLSMixin, ContextGeneratingMixin,
ObjectModelIntegrationMixin, BadContextTestsMixin,
ConnectToTCPListenerMixin,
StreamTransportTestsMixin, ReactorBuilder):
"""
Tests for L{IReactorSSL.listenSSL}
"""
def getListeningPort(self, reactor, factory):
"""
Get a TLS port from a reactor.
"""
return reactor.listenSSL(0, factory, self.getServerContext())
def getExpectedStartListeningLogMessage(self, port, factory):
"""
Get the message expected to be logged when a TLS port starts listening.
"""
return "%s (TLS) starting on %d" % (factory, port.getHost().port)
def getExpectedConnectionLostLogMsg(self, port):
"""
Get the expected connection lost message for a TLS port.
"""
return "(TLS Port %s Closed)" % (port.getHost().port,)
def test_badContext(self):
"""
If the context factory passed to L{IReactorSSL.listenSSL} raises an
exception from its C{getContext} method, that exception is raised by
L{IReactorSSL.listenSSL}.
"""
def useIt(reactor, contextFactory):
return reactor.listenSSL(0, ServerFactory(), contextFactory)
self._testBadContext(useIt)
def connectToListener(self, reactor, address, factory):
"""
Connect to the given listening TLS port, assuming the
underlying transport is TCP.
@param reactor: The reactor under test.
@type reactor: L{IReactorSSL}
@param address: The listening's address. Only the C{port}
component is used; see
L{ConnectToTCPListenerMixin.LISTENER_HOST}.
@type address: L{IPv4Address} or L{IPv6Address}
@param factory: The client factory.
@type factory: L{ClientFactory}
@return: The connector
"""
return reactor.connectSSL(
self.LISTENER_HOST,
address.port,
factory,
self.getClientContext(),
)
globals().update(SSLClientTestsMixin.makeTestCaseClasses())
globals().update(StartTLSClientTestsMixin.makeTestCaseClasses())
globals().update(TLSPortTestsBuilder().makeTestCaseClasses())
class AbortSSLConnectionTests(ReactorBuilder, AbortConnectionMixin, ContextGeneratingMixin):
"""
C{abortConnection} tests using SSL.
"""
requiredInterfaces = (IReactorSSL,)
endpoints = SSLCreator()
def buildReactor(self):
reactor = ReactorBuilder.buildReactor(self)
from twisted.internet import _producer_helpers
# Patch twisted.protocols.tls to use this reactor, until we get
# around to fixing #5206, or the TLS code uses an explicit reactor:
cooperator = Cooperator(
scheduler=lambda x: reactor.callLater(0.00001, x))
self.patch(_producer_helpers, "cooperate", cooperator.cooperate)
return reactor
def setUp(self):
if FILETYPE_PEM is None:
raise SkipTest("OpenSSL not available.")
globals().update(AbortSSLConnectionTests.makeTestCaseClasses())

View file

@ -0,0 +1,515 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorUDP} and the UDP parts of
L{IReactorSocket}.
"""
from __future__ import division, absolute_import
__metaclass__ = type
import socket
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.python import context
from twisted.python.log import ILogContext, err
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.defer import Deferred, maybeDeferred
from twisted.internet.interfaces import (
ILoggingContext, IListeningPort, IReactorUDP, IReactorSocket)
from twisted.internet.address import IPv4Address, IPv6Address
from twisted.internet.protocol import DatagramProtocol
from twisted.internet.test.connectionmixins import (LogObserverMixin,
findFreePort)
from twisted.internet import defer, error
from twisted.test.test_udp import Server, GoodClient
from twisted.trial.unittest import SkipTest
def _has_ipv6():
""" Returns True if the system can bind an IPv6 address."""
sock = None
has_ipv6 = False
try:
sock = socket.socket(socket.AF_INET6)
sock.bind(("::1", 0))
has_ipv6 = True
except socket.error:
pass
if sock:
sock.close()
return has_ipv6
HAS_IPV6 = _has_ipv6()
def skipWithoutIPv6(f):
if not HAS_IPV6:
f.skip = "Does not work on systems without IPv6 support."
return f
class DatagramTransportTestsMixin(LogObserverMixin):
"""
Mixin defining tests which apply to any port/datagram based transport.
"""
def test_startedListeningLogMessage(self):
"""
When a port starts, a message including a description of the associated
protocol is logged.
"""
loggedMessages = self.observe()
reactor = self.buildReactor()
@implementer(ILoggingContext)
class SomeProtocol(DatagramProtocol):
def logPrefix(self):
return "Crazy Protocol"
protocol = SomeProtocol()
p = self.getListeningPort(reactor, protocol)
expectedMessage = "Crazy Protocol starting on %d" % (p.getHost().port,)
self.assertEqual((expectedMessage,), loggedMessages[0]['message'])
def test_connectionLostLogMessage(self):
"""
When a connection is lost a message is logged containing an
address identifying the port and the fact that it was closed.
"""
loggedMessages = self.observe()
reactor = self.buildReactor()
p = self.getListeningPort(reactor, DatagramProtocol())
expectedMessage = "(UDP Port %s Closed)" % (p.getHost().port,)
def stopReactor(ignored):
reactor.stop()
def doStopListening():
del loggedMessages[:]
maybeDeferred(p.stopListening).addCallback(stopReactor)
reactor.callWhenRunning(doStopListening)
self.runReactor(reactor)
self.assertEqual((expectedMessage,), loggedMessages[0]['message'])
def test_stopProtocolScheduling(self):
"""
L{DatagramProtocol.stopProtocol} is called asynchronously (ie, not
re-entrantly) when C{stopListening} is used to stop the datagram
transport.
"""
class DisconnectingProtocol(DatagramProtocol):
started = False
stopped = False
inStartProtocol = False
stoppedInStart = False
def startProtocol(self):
self.started = True
self.inStartProtocol = True
self.transport.stopListening()
self.inStartProtocol = False
def stopProtocol(self):
self.stopped = True
self.stoppedInStart = self.inStartProtocol
reactor.stop()
reactor = self.buildReactor()
protocol = DisconnectingProtocol()
self.getListeningPort(reactor, protocol)
self.runReactor(reactor)
self.assertTrue(protocol.started)
self.assertTrue(protocol.stopped)
self.assertFalse(protocol.stoppedInStart)
class UDPPortTestsMixin(object):
"""
Tests for L{IReactorUDP.listenUDP} and
L{IReactorSocket.adoptDatagramPort}.
"""
def test_interface(self):
"""
L{IReactorUDP.listenUDP} returns an object providing L{IListeningPort}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertTrue(verifyObject(IListeningPort, port))
def test_getHost(self):
"""
L{IListeningPort.getHost} returns an L{IPv4Address} giving a
dotted-quad of the IPv4 address the port is listening on as well as
the port number.
"""
host, portNumber = findFreePort(type=socket.SOCK_DGRAM)
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), port=portNumber, interface=host)
self.assertEqual(
port.getHost(), IPv4Address('UDP', host, portNumber))
@skipWithoutIPv6
def test_getHostIPv6(self):
"""
L{IListeningPort.getHost} returns an L{IPv6Address} when listening on
an IPv6 interface.
"""
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), interface='::1')
addr = port.getHost()
self.assertEqual(addr.host, "::1")
self.assertIsInstance(addr, IPv6Address)
def test_invalidInterface(self):
"""
An L{InvalidAddressError} is raised when trying to listen on an address
that isn't a valid IPv4 or IPv6 address.
"""
reactor = self.buildReactor()
self.assertRaises(
error.InvalidAddressError, reactor.listenUDP, DatagramProtocol(),
0, interface='example.com')
def test_logPrefix(self):
"""
Datagram transports implement L{ILoggingContext.logPrefix} to return a
message reflecting the protocol they are running.
"""
class CustomLogPrefixDatagramProtocol(DatagramProtocol):
def __init__(self, prefix):
self._prefix = prefix
self.system = Deferred()
def logPrefix(self):
return self._prefix
def datagramReceived(self, bytes, addr):
if self.system is not None:
system = self.system
self.system = None
system.callback(context.get(ILogContext)["system"])
reactor = self.buildReactor()
protocol = CustomLogPrefixDatagramProtocol("Custom Datagrams")
d = protocol.system
port = self.getListeningPort(reactor, protocol)
address = port.getHost()
def gotSystem(system):
self.assertEqual("Custom Datagrams (UDP)", system)
d.addCallback(gotSystem)
d.addErrback(err)
d.addCallback(lambda ignored: reactor.stop())
port.write(b"some bytes", ('127.0.0.1', address.port))
self.runReactor(reactor)
def test_writeSequence(self):
"""
Write a sequence of L{bytes} to a L{DatagramProtocol}.
"""
class SimpleDatagramProtocol(DatagramProtocol):
def __init__(self):
self.defer = Deferred()
def datagramReceived(self, data, addr):
self.defer.callback(data)
reactor = self.buildReactor()
protocol = SimpleDatagramProtocol()
defer = protocol.defer
port = self.getListeningPort(reactor, protocol)
address = port.getHost()
dataToWrite = (b"some", b"bytes", b"to", b"write")
def gotData(data):
self.assertEqual(b"".join(dataToWrite), data)
defer.addCallback(gotData)
defer.addErrback(err)
defer.addCallback(lambda ignored: reactor.stop())
port.writeSequence(dataToWrite, ('127.0.0.1', address.port))
self.runReactor(reactor)
def test_str(self):
"""
C{str()} on the listening port object includes the port number.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertIn(str(port.getHost().port), str(port))
def test_repr(self):
"""
C{repr()} on the listening port object includes the port number.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertIn(repr(port.getHost().port), str(port))
@skipWithoutIPv6
def test_writeToIPv6Interface(self):
"""
Writing to an IPv6 UDP socket on the loopback interface succeeds.
"""
reactor = self.buildReactor()
server = Server()
serverStarted = server.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, server, interface="::1")
client = GoodClient()
clientStarted = client.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, client, interface="::1")
cAddr = client.transport.getHost()
def cbClientStarted(ignored):
"""
Send a datagram from the client once it's started.
@param ignored: a list of C{[None, None]}, which is ignored
@returns: a deferred which fires when the server has received a
datagram.
"""
client.transport.write(
b"spam", ("::1", server.transport.getHost().port))
serverReceived = server.packetReceived = defer.Deferred()
return serverReceived
def cbServerReceived(ignored):
"""
Stop the reactor after a datagram is received.
@param ignored: L{None}, which is ignored
@returns: L{None}
"""
reactor.stop()
d = defer.gatherResults([serverStarted, clientStarted])
d.addCallback(cbClientStarted)
d.addCallback(cbServerReceived)
d.addErrback(err)
self.runReactor(reactor)
packet = server.packets[0]
self.assertEqual(packet, (b'spam', (cAddr.host, cAddr.port)))
@skipWithoutIPv6
def test_connectedWriteToIPv6Interface(self):
"""
An IPv6 address can be passed as the C{interface} argument to
L{listenUDP}. The resulting Port accepts IPv6 datagrams.
"""
reactor = self.buildReactor()
server = Server()
serverStarted = server.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, server, interface="::1")
client = GoodClient()
clientStarted = client.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, client, interface="::1")
cAddr = client.transport.getHost()
def cbClientStarted(ignored):
"""
Send a datagram from the client once it's started.
@param ignored: a list of C{[None, None]}, which is ignored
@returns: a deferred which fires when the server has received a
datagram.
"""
client.transport.connect("::1", server.transport.getHost().port)
client.transport.write(b"spam")
serverReceived = server.packetReceived = defer.Deferred()
return serverReceived
def cbServerReceived(ignored):
"""
Stop the reactor after a datagram is received.
@param ignored: L{None}, which is ignored
@returns: L{None}
"""
reactor.stop()
d = defer.gatherResults([serverStarted, clientStarted])
d.addCallback(cbClientStarted)
d.addCallback(cbServerReceived)
d.addErrback(err)
self.runReactor(reactor)
packet = server.packets[0]
self.assertEqual(packet, (b'spam', (cAddr.host, cAddr.port)))
def test_writingToHostnameRaisesInvalidAddressError(self):
"""
Writing to a hostname instead of an IP address will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertRaises(
error.InvalidAddressError,
port.write, 'spam', ('example.invalid', 1))
@skipWithoutIPv6
def test_writingToIPv6OnIPv4RaisesInvalidAddressError(self):
"""
Writing to an IPv6 address on an IPv4 socket will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), interface="127.0.0.1")
self.assertRaises(
error.InvalidAddressError, port.write, 'spam', ('::1', 1))
@skipWithoutIPv6
def test_writingToIPv4OnIPv6RaisesInvalidAddressError(self):
"""
Writing to an IPv6 address on an IPv4 socket will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), interface="::1")
self.assertRaises(
error.InvalidAddressError, port.write, 'spam', ('127.0.0.1', 1))
def test_connectingToHostnameRaisesInvalidAddressError(self):
"""
Connecting to a hostname instead of an IP address will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertRaises(
error.InvalidAddressError, port.connect, 'example.invalid', 1)
def test_allowBroadcast(self):
"""
L{IListeningPort.setBroadcastAllowed} sets broadcast to be allowed
on the socket.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
port.setBroadcastAllowed(True)
self.assertTrue(port.getBroadcastAllowed())
class UDPServerTestsBuilder(ReactorBuilder,
UDPPortTestsMixin, DatagramTransportTestsMixin):
"""
Run L{UDPPortTestsMixin} tests using newly created UDP
sockets.
"""
requiredInterfaces = (IReactorUDP,)
def getListeningPort(self, reactor, protocol, port=0, interface='',
maxPacketSize=8192):
"""
Get a UDP port from a reactor.
@param reactor: A reactor used to build the returned
L{IListeningPort} provider.
@type reactor: L{twisted.internet.interfaces.IReactorUDP}
@see: L{twisted.internet.IReactorUDP.listenUDP} for other
argument and return types.
"""
return reactor.listenUDP(port, protocol, interface=interface,
maxPacketSize=maxPacketSize)
class UDPFDServerTestsBuilder(ReactorBuilder,
UDPPortTestsMixin, DatagramTransportTestsMixin):
"""
Run L{UDPPortTestsMixin} tests using adopted UDP sockets.
"""
requiredInterfaces = (IReactorSocket,)
def getListeningPort(self, reactor, protocol, port=0, interface='',
maxPacketSize=8192):
"""
Get a UDP port from a reactor, wrapping an already-initialized file
descriptor.
@param reactor: A reactor used to build the returned
L{IListeningPort} provider.
@type reactor: L{twisted.internet.interfaces.IReactorSocket}
@param port: A port number to which the adopted socket will be
bound.
@type port: C{int}
@param interface: The local IPv4 or IPv6 address to which the
adopted socket will be bound. defaults to '', ie all IPv4
addresses.
@type interface: C{str}
@see: L{twisted.internet.IReactorSocket.adoptDatagramPort} for other
argument and return types.
"""
if IReactorSocket.providedBy(reactor):
if ':' in interface:
domain = socket.AF_INET6
address = socket.getaddrinfo(interface, port)[0][4]
else:
domain = socket.AF_INET
address = (interface, port)
portSock = socket.socket(domain, socket.SOCK_DGRAM)
portSock.bind(address)
portSock.setblocking(False)
try:
return reactor.adoptDatagramPort(
portSock.fileno(), portSock.family, protocol,
maxPacketSize)
finally:
# The socket should still be open; fileno will raise if it is
# not.
portSock.fileno()
# Now clean it up, because the rest of the test does not need
# it.
portSock.close()
else:
raise SkipTest("Reactor does not provide IReactorSocket")
globals().update(UDPServerTestsBuilder.makeTestCaseClasses())
globals().update(UDPFDServerTestsBuilder.makeTestCaseClasses())

View file

@ -0,0 +1,165 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the internal implementation details of L{twisted.internet.udp}.
"""
from __future__ import division, absolute_import
import socket
from twisted.trial import unittest
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import udp
from twisted.python.runtime import platformType
if platformType == 'win32':
from errno import WSAEWOULDBLOCK as EWOULDBLOCK
else:
from errno import EWOULDBLOCK
class StringUDPSocket(object):
"""
A fake UDP socket object, which returns a fixed sequence of strings and/or
socket errors. Useful for testing.
@ivar retvals: A C{list} containing either strings or C{socket.error}s.
@ivar connectedAddr: The address the socket is connected to.
"""
def __init__(self, retvals):
self.retvals = retvals
self.connectedAddr = None
def connect(self, addr):
self.connectedAddr = addr
def recvfrom(self, size):
"""
Return (or raise) the next value from C{self.retvals}.
"""
ret = self.retvals.pop(0)
if isinstance(ret, socket.error):
raise ret
return ret, None
class KeepReads(DatagramProtocol):
"""
Accumulate reads in a list.
"""
def __init__(self):
self.reads = []
def datagramReceived(self, data, addr):
self.reads.append(data)
class ErrorsTests(unittest.SynchronousTestCase):
"""
Error handling tests for C{udp.Port}.
"""
def test_socketReadNormal(self):
"""
Socket reads with some good data followed by a socket error which can
be ignored causes reading to stop, and no log messages to be logged.
"""
# Add a fake error to the list of ignorables:
udp._sockErrReadIgnore.append(-7000)
self.addCleanup(udp._sockErrReadIgnore.remove, -7000)
protocol = KeepReads()
port = udp.Port(None, protocol)
# Normal result, no errors
port.socket = StringUDPSocket(
[b"result", b"123", socket.error(-7000), b"456",
socket.error(-7000)])
port.doRead()
# Read stops on error:
self.assertEqual(protocol.reads, [b"result", b"123"])
port.doRead()
self.assertEqual(protocol.reads, [b"result", b"123", b"456"])
def test_readImmediateError(self):
"""
If the socket is unconnected, socket reads with an immediate
connection refusal are ignored, and reading stops. The protocol's
C{connectionRefused} method is not called.
"""
# Add a fake error to the list of those that count as connection
# refused:
udp._sockErrReadRefuse.append(-6000)
self.addCleanup(udp._sockErrReadRefuse.remove, -6000)
protocol = KeepReads()
# Fail if connectionRefused is called:
protocol.connectionRefused = lambda: 1/0
port = udp.Port(None, protocol)
# Try an immediate "connection refused"
port.socket = StringUDPSocket([b"a", socket.error(-6000), b"b",
socket.error(EWOULDBLOCK)])
port.doRead()
# Read stops on error:
self.assertEqual(protocol.reads, [b"a"])
# Read again:
port.doRead()
self.assertEqual(protocol.reads, [b"a", b"b"])
def test_connectedReadImmediateError(self):
"""
If the socket connected, socket reads with an immediate
connection refusal are ignored, and reading stops. The protocol's
C{connectionRefused} method is called.
"""
# Add a fake error to the list of those that count as connection
# refused:
udp._sockErrReadRefuse.append(-6000)
self.addCleanup(udp._sockErrReadRefuse.remove, -6000)
protocol = KeepReads()
refused = []
protocol.connectionRefused = lambda: refused.append(True)
port = udp.Port(None, protocol)
port.socket = StringUDPSocket([b"a", socket.error(-6000), b"b",
socket.error(EWOULDBLOCK)])
port.connect("127.0.0.1", 9999)
# Read stops on error:
port.doRead()
self.assertEqual(protocol.reads, [b"a"])
self.assertEqual(refused, [True])
# Read again:
port.doRead()
self.assertEqual(protocol.reads, [b"a", b"b"])
self.assertEqual(refused, [True])
def test_readUnknownError(self):
"""
Socket reads with an unknown socket error are raised.
"""
protocol = KeepReads()
port = udp.Port(None, protocol)
# Some good data, followed by an unknown error
port.socket = StringUDPSocket([b"good", socket.error(-1337)])
self.assertRaises(socket.error, port.doRead)
self.assertEqual(protocol.reads, [b"good"])

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,199 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorWin32Events}.
"""
try:
import win32event
except ImportError:
win32event = None
from zope.interface.verify import verifyObject
from twisted.python.failure import Failure
from twisted.python.threadable import isInIOThread
from twisted.internet.interfaces import IReactorWin32Events
from twisted.internet.defer import Deferred
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.python.threadable import getThreadID
class Listener(object):
"""
L{Listener} is an object that can be added to a L{IReactorWin32Events}
reactor to receive callback notification when a Windows event is set. It
records what thread its callback is invoked in and fires a Deferred.
@ivar success: A flag which is set to C{True} when the event callback is
called.
@ivar logThreadID: The id of the thread in which the C{logPrefix} method is
called.
@ivar eventThreadID: The id of the thread in which the event callback is
called.
@ivar connLostThreadID: The id of the thread in which the C{connectionLost}
method is called.
@ivar _finished: The L{Deferred} which will be fired when the event callback
is called.
"""
success = False
logThreadID = eventThreadID = connLostThreadID = None
def __init__(self, finished):
self._finished = finished
def logPrefix(self):
self.logThreadID = getThreadID()
return 'Listener'
def occurred(self):
self.success = True
self.eventThreadID = getThreadID()
self._finished.callback(None)
def brokenOccurred(self):
raise RuntimeError("Some problem")
def returnValueOccurred(self):
return EnvironmentError("Entirely different problem")
def connectionLost(self, reason):
self.connLostThreadID = getThreadID()
self._finished.errback(reason)
class Win32EventsTestsBuilder(ReactorBuilder):
"""
Builder defining tests relating to L{IReactorWin32Events}.
"""
requiredInterfaces = [IReactorWin32Events]
def test_interface(self):
"""
An instance of the reactor has all of the methods defined on
L{IReactorWin32Events}.
"""
reactor = self.buildReactor()
verifyObject(IReactorWin32Events, reactor)
def test_addEvent(self):
"""
When an event which has been added to the reactor is set, the action
associated with the event is invoked in the reactor thread.
"""
reactorThreadID = getThreadID()
reactor = self.buildReactor()
event = win32event.CreateEvent(None, False, False, None)
finished = Deferred()
finished.addCallback(lambda ignored: reactor.stop())
listener = Listener(finished)
reactor.addEvent(event, listener, 'occurred')
reactor.callWhenRunning(win32event.SetEvent, event)
self.runReactor(reactor)
self.assertTrue(listener.success)
self.assertEqual(reactorThreadID, listener.logThreadID)
self.assertEqual(reactorThreadID, listener.eventThreadID)
def test_ioThreadDoesNotChange(self):
"""
Using L{IReactorWin32Events.addEvent} does not change which thread is
reported as the I/O thread.
"""
results = []
def check(ignored):
results.append(isInIOThread())
reactor.stop()
reactor = self.buildReactor()
event = win32event.CreateEvent(None, False, False, None)
finished = Deferred()
listener = Listener(finished)
finished.addCallback(check)
reactor.addEvent(event, listener, 'occurred')
reactor.callWhenRunning(win32event.SetEvent, event)
self.runReactor(reactor)
self.assertTrue(listener.success)
self.assertEqual([True], results)
def test_disconnectedOnError(self):
"""
If the event handler raises an exception, the event is removed from the
reactor and the handler's C{connectionLost} method is called in the I/O
thread and the exception is logged.
"""
reactorThreadID = getThreadID()
reactor = self.buildReactor()
event = win32event.CreateEvent(None, False, False, None)
result = []
finished = Deferred()
finished.addBoth(result.append)
finished.addBoth(lambda ignored: reactor.stop())
listener = Listener(finished)
reactor.addEvent(event, listener, 'brokenOccurred')
reactor.callWhenRunning(win32event.SetEvent, event)
self.runReactor(reactor)
self.assertIsInstance(result[0], Failure)
result[0].trap(RuntimeError)
self.assertEqual(reactorThreadID, listener.connLostThreadID)
self.assertEqual(1, len(self.flushLoggedErrors(RuntimeError)))
def test_disconnectOnReturnValue(self):
"""
If the event handler returns a value, the event is removed from the
reactor and the handler's C{connectionLost} method is called in the I/O
thread.
"""
reactorThreadID = getThreadID()
reactor = self.buildReactor()
event = win32event.CreateEvent(None, False, False, None)
result = []
finished = Deferred()
finished.addBoth(result.append)
finished.addBoth(lambda ignored: reactor.stop())
listener = Listener(finished)
reactor.addEvent(event, listener, 'returnValueOccurred')
reactor.callWhenRunning(win32event.SetEvent, event)
self.runReactor(reactor)
self.assertIsInstance(result[0], Failure)
result[0].trap(EnvironmentError)
self.assertEqual(reactorThreadID, listener.connLostThreadID)
def test_notDisconnectedOnShutdown(self):
"""
Event handlers added with L{IReactorWin32Events.addEvent} do not have
C{connectionLost} called on them if they are still active when the
reactor shuts down.
"""
reactor = self.buildReactor()
event = win32event.CreateEvent(None, False, False, None)
finished = Deferred()
listener = Listener(finished)
reactor.addEvent(event, listener, 'occurred')
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
self.assertIsNone(listener.connLostThreadID)
globals().update(Win32EventsTestsBuilder.makeTestCaseClasses())

View file

@ -0,0 +1,175 @@
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.serialport}.
"""
import os
import shutil
import tempfile
from twisted.trial import unittest
from twisted.internet.protocol import Protocol
from twisted.python.failure import Failure
from twisted.python.runtime import platform
from twisted.internet.test.test_serialport import DoNothing
testingForced = 'TWISTED_FORCE_SERIAL_TESTS' in os.environ
try:
from twisted.internet import serialport
import serial
except ImportError:
if testingForced:
raise
serialport = None
serial = None
if serialport is not None:
class RegularFileSerial(serial.Serial):
def __init__(self, *args, **kwargs):
super(RegularFileSerial, self).__init__(*args, **kwargs)
self.captured_args = args
self.captured_kwargs = kwargs
def _reconfigurePort(self):
pass
def _reconfigure_port(self):
pass
class RegularFileSerialPort(serialport.SerialPort):
_serialFactory = RegularFileSerial
def __init__(self, *args, **kwargs):
cbInQue = kwargs.get('cbInQue')
if 'cbInQue' in kwargs:
del kwargs['cbInQue']
self.comstat = serial.win32.COMSTAT
self.comstat.cbInQue = cbInQue
super(RegularFileSerialPort, self).__init__(*args, **kwargs)
def _clearCommError(self):
return True, self.comstat
class CollectReceivedProtocol(Protocol):
def __init__(self):
self.received_data = []
def dataReceived(self, data):
self.received_data.append(data)
class Win32SerialPortTests(unittest.TestCase):
"""
Minimal testing for Twisted's Win32 serial port support.
"""
if not testingForced:
if not platform.isWindows():
skip = "This test must run on Windows."
elif not serialport:
skip = "Windows serial port support is not available."
def setUp(self):
# Re-usable protocol and reactor
self.protocol = Protocol()
self.reactor = DoNothing()
self.directory = tempfile.mkdtemp()
self.path = os.path.join(self.directory, 'fake_serial')
data = b'1234'
with open(self.path, 'wb') as f:
f.write(data)
def tearDown(self):
shutil.rmtree(self.directory)
def test_serialPortDefaultArgs(self):
"""
Test correct positional and keyword arguments have been
passed to the C{serial.Serial} object.
"""
port = RegularFileSerialPort(self.protocol, self.path, self.reactor)
# Validate args
self.assertEqual((self.path,), port._serial.captured_args)
# Validate kwargs
kwargs = port._serial.captured_kwargs
self.assertEqual(9600, kwargs["baudrate"])
self.assertEqual(serial.EIGHTBITS, kwargs["bytesize"])
self.assertEqual(serial.PARITY_NONE, kwargs["parity"])
self.assertEqual(serial.STOPBITS_ONE, kwargs["stopbits"])
self.assertEqual(0, kwargs["xonxoff"])
self.assertEqual(0, kwargs["rtscts"])
self.assertEqual(None, kwargs["timeout"])
port.connectionLost(Failure(Exception("Cleanup")))
def test_serialPortInitiallyConnected(self):
"""
Test the port is connected at initialization time, and
C{Protocol.makeConnection} has been called on the desired protocol.
"""
self.assertEqual(0, self.protocol.connected)
port = RegularFileSerialPort(self.protocol, self.path, self.reactor)
self.assertEqual(1, port.connected)
self.assertEqual(1, self.protocol.connected)
self.assertEqual(port, self.protocol.transport)
port.connectionLost(Failure(Exception("Cleanup")))
def common_exerciseHandleAccess(self, cbInQue):
port = RegularFileSerialPort(
protocol=self.protocol,
deviceNameOrPortNumber=self.path,
reactor=self.reactor,
cbInQue=cbInQue,
)
port.serialReadEvent()
port.write(b'')
port.write(b'abcd')
port.write(b'ABCD')
port.serialWriteEvent()
port.serialWriteEvent()
port.connectionLost(Failure(Exception("Cleanup")))
# No assertion since the point is simply to make sure that in all cases
# the port handle resolves instead of raising an exception.
def test_exerciseHandleAccess_1(self):
self.common_exerciseHandleAccess(cbInQue=False)
def test_exerciseHandleAccess_2(self):
self.common_exerciseHandleAccess(cbInQue=True)
def common_serialPortReturnsBytes(self, cbInQue):
protocol = CollectReceivedProtocol()
port = RegularFileSerialPort(
protocol=protocol,
deviceNameOrPortNumber=self.path,
reactor=self.reactor,
cbInQue=cbInQue,
)
port.serialReadEvent()
self.assertTrue(all(
isinstance(d, bytes) for d in protocol.received_data
))
port.connectionLost(Failure(Exception("Cleanup")))
def test_serialPortReturnsBytes_1(self):
self.common_serialPortReturnsBytes(cbInQue=False)
def test_serialPortReturnsBytes_2(self):
self.common_serialPortReturnsBytes(cbInQue=True)