Ausgabe der neuen DB Einträge
This commit is contained in:
parent
bad48e1627
commit
cfbbb9ee3d
2399 changed files with 843193 additions and 43 deletions
|
|
@ -0,0 +1,25 @@
|
|||
# -*- test-case-name: twisted.test.test_paths -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Twisted integration with operating system threads.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from ._threadworker import ThreadWorker, LockWorker
|
||||
from ._ithreads import IWorker, AlreadyQuit
|
||||
from ._team import Team
|
||||
from ._memory import createMemoryWorker
|
||||
from ._pool import pool
|
||||
|
||||
__all__ = [
|
||||
"ThreadWorker",
|
||||
"LockWorker",
|
||||
"IWorker",
|
||||
"AlreadyQuit",
|
||||
"Team",
|
||||
"createMemoryWorker",
|
||||
"pool",
|
||||
]
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
# -*- test-case-name: twisted._threads.test.test_convenience -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Common functionality used within the implementation of various workers.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from ._ithreads import AlreadyQuit
|
||||
|
||||
|
||||
class Quit(object):
|
||||
"""
|
||||
A flag representing whether a worker has been quit.
|
||||
|
||||
@ivar isSet: Whether this flag is set.
|
||||
@type isSet: L{bool}
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Create a L{Quit} un-set.
|
||||
"""
|
||||
self.isSet = False
|
||||
|
||||
|
||||
def set(self):
|
||||
"""
|
||||
Set the flag if it has not been set.
|
||||
|
||||
@raise AlreadyQuit: If it has been set.
|
||||
"""
|
||||
self.check()
|
||||
self.isSet = True
|
||||
|
||||
|
||||
def check(self):
|
||||
"""
|
||||
Check if the flag has been set.
|
||||
|
||||
@raise AlreadyQuit: If it has been set.
|
||||
"""
|
||||
if self.isSet:
|
||||
raise AlreadyQuit()
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
# -*- test-case-name: twisted._threads.test -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Interfaces related to threads.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from zope.interface import Interface
|
||||
|
||||
|
||||
class AlreadyQuit(Exception):
|
||||
"""
|
||||
This worker worker is dead and cannot execute more instructions.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class IWorker(Interface):
|
||||
"""
|
||||
A worker that can perform some work concurrently.
|
||||
|
||||
All methods on this interface must be thread-safe.
|
||||
"""
|
||||
|
||||
def do(task):
|
||||
"""
|
||||
Perform the given task.
|
||||
|
||||
As an interface, this method makes no specific claims about concurrent
|
||||
execution. An L{IWorker}'s C{do} implementation may defer execution
|
||||
for later on the same thread, immediately on a different thread, or
|
||||
some combination of the two. It is valid for a C{do} method to
|
||||
schedule C{task} in such a way that it may never be executed.
|
||||
|
||||
It is important for some implementations to provide specific properties
|
||||
with respect to where C{task} is executed, of course, and client code
|
||||
may rely on a more specific implementation of C{do} than L{IWorker}.
|
||||
|
||||
@param task: a task to call in a thread or other concurrent context.
|
||||
@type task: 0-argument callable
|
||||
|
||||
@raise AlreadyQuit: if C{quit} has been called.
|
||||
"""
|
||||
|
||||
def quit():
|
||||
"""
|
||||
Free any resources associated with this L{IWorker} and cause it to
|
||||
reject all future work.
|
||||
|
||||
@raise: L{AlreadyQuit} if this method has already been called.
|
||||
"""
|
||||
|
||||
|
||||
class IExclusiveWorker(IWorker):
|
||||
"""
|
||||
Like L{IWorker}, but with the additional guarantee that the callables
|
||||
passed to C{do} will not be called exclusively with each other.
|
||||
"""
|
||||
71
venv/lib/python3.9/site-packages/twisted/_threads/_memory.py
Normal file
71
venv/lib/python3.9/site-packages/twisted/_threads/_memory.py
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
# -*- test-case-name: twisted._threads.test.test_memory -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Implementation of an in-memory worker that defers execution.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
from . import IWorker
|
||||
from ._convenience import Quit
|
||||
|
||||
NoMoreWork = object()
|
||||
|
||||
@implementer(IWorker)
|
||||
class MemoryWorker(object):
|
||||
"""
|
||||
An L{IWorker} that queues work for later performance.
|
||||
|
||||
@ivar _quit: a flag indicating
|
||||
@type _quit: L{Quit}
|
||||
"""
|
||||
|
||||
def __init__(self, pending=list):
|
||||
"""
|
||||
Create a L{MemoryWorker}.
|
||||
"""
|
||||
self._quit = Quit()
|
||||
self._pending = pending()
|
||||
|
||||
|
||||
def do(self, work):
|
||||
"""
|
||||
Queue some work for to perform later; see L{createMemoryWorker}.
|
||||
|
||||
@param work: The work to perform.
|
||||
"""
|
||||
self._quit.check()
|
||||
self._pending.append(work)
|
||||
|
||||
|
||||
def quit(self):
|
||||
"""
|
||||
Quit this worker.
|
||||
"""
|
||||
self._quit.set()
|
||||
self._pending.append(NoMoreWork)
|
||||
|
||||
|
||||
|
||||
def createMemoryWorker():
|
||||
"""
|
||||
Create an L{IWorker} that does nothing but defer work, to be performed
|
||||
later.
|
||||
|
||||
@return: a worker that will enqueue work to perform later, and a callable
|
||||
that will perform one element of that work.
|
||||
@rtype: 2-L{tuple} of (L{IWorker}, L{callable})
|
||||
"""
|
||||
def perform():
|
||||
if not worker._pending:
|
||||
return False
|
||||
if worker._pending[0] is NoMoreWork:
|
||||
return False
|
||||
worker._pending.pop(0)()
|
||||
return True
|
||||
worker = MemoryWorker()
|
||||
return (worker, perform)
|
||||
69
venv/lib/python3.9/site-packages/twisted/_threads/_pool.py
Normal file
69
venv/lib/python3.9/site-packages/twisted/_threads/_pool.py
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
# -*- test-case-name: twisted._threads.test -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Top level thread pool interface, used to implement
|
||||
L{twisted.python.threadpool}.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from threading import Thread, Lock, local as LocalStorage
|
||||
try:
|
||||
from Queue import Queue
|
||||
except ImportError:
|
||||
from queue import Queue
|
||||
|
||||
from twisted.python.log import err
|
||||
|
||||
from ._threadworker import LockWorker
|
||||
from ._team import Team
|
||||
from ._threadworker import ThreadWorker
|
||||
|
||||
|
||||
def pool(currentLimit, threadFactory=Thread):
|
||||
"""
|
||||
Construct a L{Team} that spawns threads as a thread pool, with the given
|
||||
limiting function.
|
||||
|
||||
@note: Future maintainers: while the public API for the eventual move to
|
||||
twisted.threads should look I{something} like this, and while this
|
||||
function is necessary to implement the API described by
|
||||
L{twisted.python.threadpool}, I am starting to think the idea of a hard
|
||||
upper limit on threadpool size is just bad (turning memory performance
|
||||
issues into correctness issues well before we run into memory
|
||||
pressure), and instead we should build something with reactor
|
||||
integration for slowly releasing idle threads when they're not needed
|
||||
and I{rate} limiting the creation of new threads rather than just
|
||||
hard-capping it.
|
||||
|
||||
@param currentLimit: a callable that returns the current limit on the
|
||||
number of workers that the returned L{Team} should create; if it
|
||||
already has more workers than that value, no new workers will be
|
||||
created.
|
||||
@type currentLimit: 0-argument callable returning L{int}
|
||||
|
||||
@param reactor: If passed, the L{IReactorFromThreads} / L{IReactorCore} to
|
||||
be used to coordinate actions on the L{Team} itself. Otherwise, a
|
||||
L{LockWorker} will be used.
|
||||
|
||||
@return: a new L{Team}.
|
||||
"""
|
||||
|
||||
def startThread(target):
|
||||
return threadFactory(target=target).start()
|
||||
|
||||
def limitedWorkerCreator():
|
||||
stats = team.statistics()
|
||||
if stats.busyWorkerCount + stats.idleWorkerCount >= currentLimit():
|
||||
return None
|
||||
return ThreadWorker(startThread, Queue())
|
||||
|
||||
team = Team(coordinator=LockWorker(Lock(), LocalStorage()),
|
||||
createWorker=limitedWorkerCreator,
|
||||
logException=err)
|
||||
return team
|
||||
|
||||
|
||||
|
||||
231
venv/lib/python3.9/site-packages/twisted/_threads/_team.py
Normal file
231
venv/lib/python3.9/site-packages/twisted/_threads/_team.py
Normal file
|
|
@ -0,0 +1,231 @@
|
|||
# -*- test-case-name: twisted._threads.test.test_team -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Implementation of a L{Team} of workers; a thread-pool that can allocate work to
|
||||
workers.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from collections import deque
|
||||
from zope.interface import implementer
|
||||
|
||||
from . import IWorker
|
||||
from ._convenience import Quit
|
||||
|
||||
|
||||
|
||||
class Statistics(object):
|
||||
"""
|
||||
Statistics about a L{Team}'s current activity.
|
||||
|
||||
@ivar idleWorkerCount: The number of idle workers.
|
||||
@type idleWorkerCount: L{int}
|
||||
|
||||
@ivar busyWorkerCount: The number of busy workers.
|
||||
@type busyWorkerCount: L{int}
|
||||
|
||||
@ivar backloggedWorkCount: The number of work items passed to L{Team.do}
|
||||
which have not yet been sent to a worker to be performed because not
|
||||
enough workers are available.
|
||||
@type backloggedWorkCount: L{int}
|
||||
"""
|
||||
|
||||
def __init__(self, idleWorkerCount, busyWorkerCount,
|
||||
backloggedWorkCount):
|
||||
self.idleWorkerCount = idleWorkerCount
|
||||
self.busyWorkerCount = busyWorkerCount
|
||||
self.backloggedWorkCount = backloggedWorkCount
|
||||
|
||||
|
||||
|
||||
@implementer(IWorker)
|
||||
class Team(object):
|
||||
"""
|
||||
A composite L{IWorker} implementation.
|
||||
|
||||
@ivar _quit: A L{Quit} flag indicating whether this L{Team} has been quit
|
||||
yet. This may be set by an arbitrary thread since L{Team.quit} may be
|
||||
called from anywhere.
|
||||
|
||||
@ivar _coordinator: the L{IExclusiveWorker} coordinating access to this
|
||||
L{Team}'s internal resources.
|
||||
|
||||
@ivar _createWorker: a callable that will create new workers.
|
||||
|
||||
@ivar _logException: a 0-argument callable called in an exception context
|
||||
when there is an unhandled error from a task passed to L{Team.do}
|
||||
|
||||
@ivar _idle: a L{set} of idle workers.
|
||||
|
||||
@ivar _busyCount: the number of workers currently busy.
|
||||
|
||||
@ivar _pending: a C{deque} of tasks - that is, 0-argument callables passed
|
||||
to L{Team.do} - that are outstanding.
|
||||
|
||||
@ivar _shouldQuitCoordinator: A flag indicating that the coordinator should
|
||||
be quit at the next available opportunity. Unlike L{Team._quit}, this
|
||||
flag is only set by the coordinator.
|
||||
|
||||
@ivar _toShrink: the number of workers to shrink this L{Team} by at the
|
||||
next available opportunity; set in the coordinator.
|
||||
"""
|
||||
|
||||
def __init__(self, coordinator, createWorker, logException):
|
||||
"""
|
||||
@param coordinator: an L{IExclusiveWorker} which will coordinate access
|
||||
to resources on this L{Team}; that is to say, an
|
||||
L{IExclusiveWorker} whose C{do} method ensures that its given work
|
||||
will be executed in a mutually exclusive context, not in parallel
|
||||
with other work enqueued by C{do} (although possibly in parallel
|
||||
with the caller).
|
||||
|
||||
@param createWorker: A 0-argument callable that will create an
|
||||
L{IWorker} to perform work.
|
||||
|
||||
@param logException: A 0-argument callable called in an exception
|
||||
context when the work passed to C{do} raises an exception.
|
||||
"""
|
||||
self._quit = Quit()
|
||||
self._coordinator = coordinator
|
||||
self._createWorker = createWorker
|
||||
self._logException = logException
|
||||
|
||||
# Don't touch these except from the coordinator.
|
||||
self._idle = set()
|
||||
self._busyCount = 0
|
||||
self._pending = deque()
|
||||
self._shouldQuitCoordinator = False
|
||||
self._toShrink = 0
|
||||
|
||||
|
||||
def statistics(self):
|
||||
"""
|
||||
Gather information on the current status of this L{Team}.
|
||||
|
||||
@return: a L{Statistics} describing the current state of this L{Team}.
|
||||
"""
|
||||
return Statistics(len(self._idle), self._busyCount, len(self._pending))
|
||||
|
||||
|
||||
def grow(self, n):
|
||||
"""
|
||||
Increase the the number of idle workers by C{n}.
|
||||
|
||||
@param n: The number of new idle workers to create.
|
||||
@type n: L{int}
|
||||
"""
|
||||
self._quit.check()
|
||||
@self._coordinator.do
|
||||
def createOneWorker():
|
||||
for x in range(n):
|
||||
worker = self._createWorker()
|
||||
if worker is None:
|
||||
return
|
||||
self._recycleWorker(worker)
|
||||
|
||||
|
||||
def shrink(self, n=None):
|
||||
"""
|
||||
Decrease the number of idle workers by C{n}.
|
||||
|
||||
@param n: The number of idle workers to shut down, or L{None} (or
|
||||
unspecified) to shut down all workers.
|
||||
@type n: L{int} or L{None}
|
||||
"""
|
||||
self._quit.check()
|
||||
self._coordinator.do(lambda: self._quitIdlers(n))
|
||||
|
||||
|
||||
def _quitIdlers(self, n=None):
|
||||
"""
|
||||
The implmentation of C{shrink}, performed by the coordinator worker.
|
||||
|
||||
@param n: see L{Team.shrink}
|
||||
"""
|
||||
if n is None:
|
||||
n = len(self._idle) + self._busyCount
|
||||
for x in range(n):
|
||||
if self._idle:
|
||||
self._idle.pop().quit()
|
||||
else:
|
||||
self._toShrink += 1
|
||||
if self._shouldQuitCoordinator and self._busyCount == 0:
|
||||
self._coordinator.quit()
|
||||
|
||||
|
||||
def do(self, task):
|
||||
"""
|
||||
Perform some work in a worker created by C{createWorker}.
|
||||
|
||||
@param task: the callable to run
|
||||
"""
|
||||
self._quit.check()
|
||||
self._coordinator.do(lambda: self._coordinateThisTask(task))
|
||||
|
||||
|
||||
def _coordinateThisTask(self, task):
|
||||
"""
|
||||
Select a worker to dispatch to, either an idle one or a new one, and
|
||||
perform it.
|
||||
|
||||
This method should run on the coordinator worker.
|
||||
|
||||
@param task: the task to dispatch
|
||||
@type task: 0-argument callable
|
||||
"""
|
||||
worker = (self._idle.pop() if self._idle
|
||||
else self._createWorker())
|
||||
if worker is None:
|
||||
# The createWorker method may return None if we're out of resources
|
||||
# to create workers.
|
||||
self._pending.append(task)
|
||||
return
|
||||
self._busyCount += 1
|
||||
@worker.do
|
||||
def doWork():
|
||||
try:
|
||||
task()
|
||||
except:
|
||||
self._logException()
|
||||
|
||||
@self._coordinator.do
|
||||
def idleAndPending():
|
||||
self._busyCount -= 1
|
||||
self._recycleWorker(worker)
|
||||
|
||||
|
||||
def _recycleWorker(self, worker):
|
||||
"""
|
||||
Called only from coordinator.
|
||||
|
||||
Recycle the given worker into the idle pool.
|
||||
|
||||
@param worker: a worker created by C{createWorker} and now idle.
|
||||
@type worker: L{IWorker}
|
||||
"""
|
||||
self._idle.add(worker)
|
||||
if self._pending:
|
||||
# Re-try the first enqueued thing.
|
||||
# (Explicitly do _not_ honor _quit.)
|
||||
self._coordinateThisTask(self._pending.popleft())
|
||||
elif self._shouldQuitCoordinator:
|
||||
self._quitIdlers()
|
||||
elif self._toShrink > 0:
|
||||
self._toShrink -= 1
|
||||
self._idle.remove(worker)
|
||||
worker.quit()
|
||||
|
||||
|
||||
def quit(self):
|
||||
"""
|
||||
Stop doing work and shut down all idle workers.
|
||||
"""
|
||||
self._quit.set()
|
||||
# In case all the workers are idle when we do this.
|
||||
@self._coordinator.do
|
||||
def startFinishing():
|
||||
self._shouldQuitCoordinator = True
|
||||
self._quitIdlers()
|
||||
|
|
@ -0,0 +1,123 @@
|
|||
# -*- test-case-name: twisted._threads.test.test_threadworker -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Implementation of an L{IWorker} based on native threads and queues.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from zope.interface import implementer
|
||||
from ._ithreads import IExclusiveWorker
|
||||
from ._convenience import Quit
|
||||
|
||||
|
||||
_stop = object()
|
||||
|
||||
@implementer(IExclusiveWorker)
|
||||
class ThreadWorker(object):
|
||||
"""
|
||||
An L{IExclusiveWorker} implemented based on a single thread and a queue.
|
||||
|
||||
This worker ensures exclusivity (i.e. it is an L{IExclusiveWorker} and not
|
||||
an L{IWorker}) by performing all of the work passed to C{do} on the I{same}
|
||||
thread.
|
||||
"""
|
||||
|
||||
def __init__(self, startThread, queue):
|
||||
"""
|
||||
Create a L{ThreadWorker} with a function to start a thread and a queue
|
||||
to use to communicate with that thread.
|
||||
|
||||
@param startThread: a callable that takes a callable to run in another
|
||||
thread.
|
||||
@type startThread: callable taking a 0-argument callable and returning
|
||||
nothing.
|
||||
|
||||
@param queue: A L{Queue} to use to give tasks to the thread created by
|
||||
C{startThread}.
|
||||
@param queue: L{Queue}
|
||||
"""
|
||||
self._q = queue
|
||||
self._hasQuit = Quit()
|
||||
def work():
|
||||
for task in iter(queue.get, _stop):
|
||||
task()
|
||||
startThread(work)
|
||||
|
||||
|
||||
def do(self, task):
|
||||
"""
|
||||
Perform the given task on the thread owned by this L{ThreadWorker}.
|
||||
|
||||
@param task: the function to call on a thread.
|
||||
"""
|
||||
self._hasQuit.check()
|
||||
self._q.put(task)
|
||||
|
||||
|
||||
def quit(self):
|
||||
"""
|
||||
Reject all future work and stop the thread started by C{__init__}.
|
||||
"""
|
||||
# Reject all future work. Set this _before_ enqueueing _stop, so
|
||||
# that no work is ever enqueued _after_ _stop.
|
||||
self._hasQuit.set()
|
||||
self._q.put(_stop)
|
||||
|
||||
|
||||
|
||||
@implementer(IExclusiveWorker)
|
||||
class LockWorker(object):
|
||||
"""
|
||||
An L{IWorker} implemented based on a mutual-exclusion lock.
|
||||
"""
|
||||
|
||||
def __init__(self, lock, local):
|
||||
"""
|
||||
@param lock: A mutual-exclusion lock, with C{acquire} and C{release}
|
||||
methods.
|
||||
@type lock: L{threading.Lock}
|
||||
|
||||
@param local: Local storage.
|
||||
@type local: L{threading.local}
|
||||
"""
|
||||
self._quit = Quit()
|
||||
self._lock = lock
|
||||
self._local = local
|
||||
|
||||
|
||||
def do(self, work):
|
||||
"""
|
||||
Do the given work on this thread, with the mutex acquired. If this is
|
||||
called re-entrantly, return and wait for the outer invocation to do the
|
||||
work.
|
||||
|
||||
@param work: the work to do with the lock held.
|
||||
"""
|
||||
lock = self._lock
|
||||
local = self._local
|
||||
self._quit.check()
|
||||
working = getattr(local, "working", None)
|
||||
if working is None:
|
||||
working = local.working = []
|
||||
working.append(work)
|
||||
lock.acquire()
|
||||
try:
|
||||
while working:
|
||||
working.pop(0)()
|
||||
finally:
|
||||
lock.release()
|
||||
local.working = None
|
||||
else:
|
||||
working.append(work)
|
||||
|
||||
|
||||
def quit(self):
|
||||
"""
|
||||
Quit this L{LockWorker}.
|
||||
"""
|
||||
self._quit.set()
|
||||
self._lock = None
|
||||
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
# -*- test-case-name: twisted._threads.test -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted._threads}.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Test cases for convenience functionality in L{twisted._threads._convenience}.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from twisted.trial.unittest import SynchronousTestCase
|
||||
|
||||
from .._convenience import Quit
|
||||
from .._ithreads import AlreadyQuit
|
||||
|
||||
|
||||
class QuitTests(SynchronousTestCase):
|
||||
"""
|
||||
Tests for L{Quit}
|
||||
"""
|
||||
|
||||
def test_isInitiallySet(self):
|
||||
"""
|
||||
L{Quit.isSet} starts as L{False}.
|
||||
"""
|
||||
quit = Quit()
|
||||
self.assertEqual(quit.isSet, False)
|
||||
|
||||
|
||||
def test_setSetsSet(self):
|
||||
"""
|
||||
L{Quit.set} sets L{Quit.isSet} to L{True}.
|
||||
"""
|
||||
quit = Quit()
|
||||
quit.set()
|
||||
self.assertEqual(quit.isSet, True)
|
||||
|
||||
|
||||
def test_checkDoesNothing(self):
|
||||
"""
|
||||
L{Quit.check} initially does nothing and returns L{None}.
|
||||
"""
|
||||
quit = Quit()
|
||||
self.assertIs(quit.check(), None)
|
||||
|
||||
|
||||
def test_checkAfterSetRaises(self):
|
||||
"""
|
||||
L{Quit.check} raises L{AlreadyQuit} if L{Quit.set} has been called.
|
||||
"""
|
||||
quit = Quit()
|
||||
quit.set()
|
||||
self.assertRaises(AlreadyQuit, quit.check)
|
||||
|
||||
|
||||
def test_setTwiceRaises(self):
|
||||
"""
|
||||
L{Quit.set} raises L{AlreadyQuit} if it has been called previously.
|
||||
"""
|
||||
quit = Quit()
|
||||
quit.set()
|
||||
self.assertRaises(AlreadyQuit, quit.set)
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted._threads._memory}.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from zope.interface.verify import verifyObject
|
||||
|
||||
from twisted.trial.unittest import SynchronousTestCase
|
||||
from .. import AlreadyQuit, IWorker, createMemoryWorker
|
||||
|
||||
|
||||
class MemoryWorkerTests(SynchronousTestCase):
|
||||
"""
|
||||
Tests for L{MemoryWorker}.
|
||||
"""
|
||||
|
||||
def test_createWorkerAndPerform(self):
|
||||
"""
|
||||
L{createMemoryWorker} creates an L{IWorker} and a callable that can
|
||||
perform work on it. The performer returns C{True} if it accomplished
|
||||
useful work.
|
||||
"""
|
||||
worker, performer = createMemoryWorker()
|
||||
verifyObject(IWorker, worker)
|
||||
done = []
|
||||
worker.do(lambda: done.append(3))
|
||||
worker.do(lambda: done.append(4))
|
||||
self.assertEqual(done, [])
|
||||
self.assertEqual(performer(), True)
|
||||
self.assertEqual(done, [3])
|
||||
self.assertEqual(performer(), True)
|
||||
self.assertEqual(done, [3, 4])
|
||||
|
||||
|
||||
def test_quitQuits(self):
|
||||
"""
|
||||
Calling C{quit} on the worker returned by L{createMemoryWorker} causes
|
||||
its C{do} and C{quit} methods to raise L{AlreadyQuit}; its C{perform}
|
||||
callable will start raising L{AlreadyQuit} when the work already
|
||||
provided to C{do} has been exhausted.
|
||||
"""
|
||||
worker, performer = createMemoryWorker()
|
||||
done = []
|
||||
def moreWork():
|
||||
done.append(7)
|
||||
worker.do(moreWork)
|
||||
worker.quit()
|
||||
self.assertRaises(AlreadyQuit, worker.do, moreWork)
|
||||
self.assertRaises(AlreadyQuit, worker.quit)
|
||||
performer()
|
||||
self.assertEqual(done, [7])
|
||||
self.assertEqual(performer(), False)
|
||||
|
||||
|
||||
def test_performWhenNothingToDoYet(self):
|
||||
"""
|
||||
The C{perform} callable returned by L{createMemoryWorker} will return
|
||||
no result when there's no work to do yet. Since there is no work to
|
||||
do, the performer returns C{False}.
|
||||
"""
|
||||
worker, performer = createMemoryWorker()
|
||||
self.assertEqual(performer(), False)
|
||||
|
|
@ -0,0 +1,290 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted._threads._team}.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from twisted.trial.unittest import SynchronousTestCase
|
||||
|
||||
from twisted.python.context import call, get
|
||||
from twisted.python.components import proxyForInterface
|
||||
|
||||
from twisted.python.failure import Failure
|
||||
from .. import IWorker, Team, createMemoryWorker, AlreadyQuit
|
||||
|
||||
class ContextualWorker(proxyForInterface(IWorker, "_realWorker")):
|
||||
"""
|
||||
A worker implementation that supplies a context.
|
||||
"""
|
||||
|
||||
def __init__(self, realWorker, **ctx):
|
||||
"""
|
||||
Create with a real worker and a context.
|
||||
"""
|
||||
self._realWorker = realWorker
|
||||
self._context = ctx
|
||||
|
||||
|
||||
def do(self, work):
|
||||
"""
|
||||
Perform the given work with the context given to __init__.
|
||||
|
||||
@param work: the work to pass on to the real worker.
|
||||
"""
|
||||
super(ContextualWorker, self).do(lambda: call(self._context, work))
|
||||
|
||||
|
||||
|
||||
class TeamTests(SynchronousTestCase):
|
||||
"""
|
||||
Tests for L{Team}
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Set up a L{Team} with inspectable, synchronous workers that can be
|
||||
single-stepped.
|
||||
"""
|
||||
coordinator, self.coordinateOnce = createMemoryWorker()
|
||||
self.coordinator = ContextualWorker(coordinator, worker="coordinator")
|
||||
self.workerPerformers = []
|
||||
self.allWorkersEver = []
|
||||
self.allUnquitWorkers = []
|
||||
self.activePerformers = []
|
||||
self.noMoreWorkers = lambda: False
|
||||
|
||||
def createWorker():
|
||||
if self.noMoreWorkers():
|
||||
return None
|
||||
worker, performer = createMemoryWorker()
|
||||
self.workerPerformers.append(performer)
|
||||
self.activePerformers.append(performer)
|
||||
cw = ContextualWorker(worker, worker=len(self.workerPerformers))
|
||||
self.allWorkersEver.append(cw)
|
||||
self.allUnquitWorkers.append(cw)
|
||||
realQuit = cw.quit
|
||||
def quitAndRemove():
|
||||
realQuit()
|
||||
self.allUnquitWorkers.remove(cw)
|
||||
self.activePerformers.remove(performer)
|
||||
cw.quit = quitAndRemove
|
||||
return cw
|
||||
|
||||
self.failures = []
|
||||
def logException():
|
||||
self.failures.append(Failure())
|
||||
self.team = Team(coordinator, createWorker, logException)
|
||||
|
||||
|
||||
def coordinate(self):
|
||||
"""
|
||||
Perform all work currently scheduled in the coordinator.
|
||||
|
||||
@return: whether any coordination work was performed; if the
|
||||
coordinator was idle when this was called, return L{False}
|
||||
(otherwise L{True}).
|
||||
@rtype: L{bool}
|
||||
"""
|
||||
did = False
|
||||
while self.coordinateOnce():
|
||||
did = True
|
||||
return did
|
||||
|
||||
|
||||
def performAllOutstandingWork(self):
|
||||
"""
|
||||
Perform all work on the coordinator and worker performers that needs to
|
||||
be done.
|
||||
"""
|
||||
continuing = True
|
||||
while continuing:
|
||||
continuing = self.coordinate()
|
||||
for performer in self.workerPerformers:
|
||||
if performer in self.activePerformers:
|
||||
performer()
|
||||
continuing = continuing or self.coordinate()
|
||||
|
||||
|
||||
def test_doDoesWorkInWorker(self):
|
||||
"""
|
||||
L{Team.do} does the work in a worker created by the createWorker
|
||||
callable.
|
||||
"""
|
||||
def something():
|
||||
something.who = get("worker")
|
||||
self.team.do(something)
|
||||
self.coordinate()
|
||||
self.assertEqual(self.team.statistics().busyWorkerCount, 1)
|
||||
self.performAllOutstandingWork()
|
||||
self.assertEqual(something.who, 1)
|
||||
self.assertEqual(self.team.statistics().busyWorkerCount, 0)
|
||||
|
||||
|
||||
def test_initialStatistics(self):
|
||||
"""
|
||||
L{Team.statistics} returns an object with idleWorkerCount,
|
||||
busyWorkerCount, and backloggedWorkCount integer attributes.
|
||||
"""
|
||||
stats = self.team.statistics()
|
||||
self.assertEqual(stats.idleWorkerCount, 0)
|
||||
self.assertEqual(stats.busyWorkerCount, 0)
|
||||
self.assertEqual(stats.backloggedWorkCount, 0)
|
||||
|
||||
|
||||
def test_growCreatesIdleWorkers(self):
|
||||
"""
|
||||
L{Team.grow} increases the number of available idle workers.
|
||||
"""
|
||||
self.team.grow(5)
|
||||
self.performAllOutstandingWork()
|
||||
self.assertEqual(len(self.workerPerformers), 5)
|
||||
|
||||
|
||||
def test_growCreateLimit(self):
|
||||
"""
|
||||
L{Team.grow} increases the number of available idle workers until the
|
||||
C{createWorker} callable starts returning None.
|
||||
"""
|
||||
self.noMoreWorkers = lambda: len(self.allWorkersEver) >= 3
|
||||
self.team.grow(5)
|
||||
self.performAllOutstandingWork()
|
||||
self.assertEqual(len(self.allWorkersEver), 3)
|
||||
self.assertEqual(self.team.statistics().idleWorkerCount, 3)
|
||||
|
||||
|
||||
def test_shrinkQuitsWorkers(self):
|
||||
"""
|
||||
L{Team.shrink} will quit the given number of workers.
|
||||
"""
|
||||
self.team.grow(5)
|
||||
self.performAllOutstandingWork()
|
||||
self.team.shrink(3)
|
||||
self.performAllOutstandingWork()
|
||||
self.assertEqual(len(self.allUnquitWorkers), 2)
|
||||
|
||||
|
||||
def test_shrinkToZero(self):
|
||||
"""
|
||||
L{Team.shrink} with no arguments will stop all outstanding workers.
|
||||
"""
|
||||
self.team.grow(10)
|
||||
self.performAllOutstandingWork()
|
||||
self.assertEqual(len(self.allUnquitWorkers), 10)
|
||||
self.team.shrink()
|
||||
self.assertEqual(len(self.allUnquitWorkers), 10)
|
||||
self.performAllOutstandingWork()
|
||||
self.assertEqual(len(self.allUnquitWorkers), 0)
|
||||
|
||||
|
||||
def test_moreWorkWhenNoWorkersAvailable(self):
|
||||
"""
|
||||
When no additional workers are available, the given work is backlogged,
|
||||
and then performed later when the work was.
|
||||
"""
|
||||
self.team.grow(3)
|
||||
self.coordinate()
|
||||
def something():
|
||||
something.times += 1
|
||||
something.times = 0
|
||||
self.assertEqual(self.team.statistics().idleWorkerCount, 3)
|
||||
for i in range(3):
|
||||
self.team.do(something)
|
||||
# Make progress on the coordinator but do _not_ actually complete the
|
||||
# work, yet.
|
||||
self.coordinate()
|
||||
self.assertEqual(self.team.statistics().idleWorkerCount, 0)
|
||||
self.noMoreWorkers = lambda: True
|
||||
self.team.do(something)
|
||||
self.coordinate()
|
||||
self.assertEqual(self.team.statistics().idleWorkerCount, 0)
|
||||
self.assertEqual(self.team.statistics().backloggedWorkCount, 1)
|
||||
self.performAllOutstandingWork()
|
||||
self.assertEqual(self.team.statistics().backloggedWorkCount, 0)
|
||||
self.assertEqual(something.times, 4)
|
||||
|
||||
|
||||
def test_exceptionInTask(self):
|
||||
"""
|
||||
When an exception is raised in a task passed to L{Team.do}, the
|
||||
C{logException} given to the L{Team} at construction is invoked in the
|
||||
exception context.
|
||||
"""
|
||||
self.team.do(lambda: 1/0)
|
||||
self.performAllOutstandingWork()
|
||||
self.assertEqual(len(self.failures), 1)
|
||||
self.assertEqual(self.failures[0].type, ZeroDivisionError)
|
||||
|
||||
|
||||
def test_quit(self):
|
||||
"""
|
||||
L{Team.quit} causes future invocations of L{Team.do} and L{Team.quit}
|
||||
to raise L{AlreadyQuit}.
|
||||
"""
|
||||
self.team.quit()
|
||||
self.assertRaises(AlreadyQuit, self.team.quit)
|
||||
self.assertRaises(AlreadyQuit, self.team.do, list)
|
||||
|
||||
|
||||
def test_quitQuits(self):
|
||||
"""
|
||||
L{Team.quit} causes all idle workers, as well as the coordinator
|
||||
worker, to quit.
|
||||
"""
|
||||
for x in range(10):
|
||||
self.team.do(list)
|
||||
self.performAllOutstandingWork()
|
||||
self.team.quit()
|
||||
self.performAllOutstandingWork()
|
||||
self.assertEqual(len(self.allUnquitWorkers), 0)
|
||||
self.assertRaises(AlreadyQuit, self.coordinator.quit)
|
||||
|
||||
|
||||
def test_quitQuitsLaterWhenBusy(self):
|
||||
"""
|
||||
L{Team.quit} causes all busy workers to be quit once they've finished
|
||||
the work they've been given.
|
||||
"""
|
||||
self.team.grow(10)
|
||||
for x in range(5):
|
||||
self.team.do(list)
|
||||
self.coordinate()
|
||||
self.team.quit()
|
||||
self.coordinate()
|
||||
self.assertEqual(len(self.allUnquitWorkers), 5)
|
||||
self.performAllOutstandingWork()
|
||||
self.assertEqual(len(self.allUnquitWorkers), 0)
|
||||
self.assertRaises(AlreadyQuit, self.coordinator.quit)
|
||||
|
||||
|
||||
def test_quitConcurrentWithWorkHappening(self):
|
||||
"""
|
||||
If work happens after L{Team.quit} sets its C{Quit} flag, but before
|
||||
any other work takes place, the L{Team} should still exit gracefully.
|
||||
"""
|
||||
self.team.do(list)
|
||||
originalSet = self.team._quit.set
|
||||
def performWorkConcurrently():
|
||||
originalSet()
|
||||
self.performAllOutstandingWork()
|
||||
self.team._quit.set = performWorkConcurrently
|
||||
self.team.quit()
|
||||
self.assertRaises(AlreadyQuit, self.team.quit)
|
||||
self.assertRaises(AlreadyQuit, self.team.do, list)
|
||||
|
||||
|
||||
def test_shrinkWhenBusy(self):
|
||||
"""
|
||||
L{Team.shrink} will wait for busy workers to finish being busy and then
|
||||
quit them.
|
||||
"""
|
||||
for x in range(10):
|
||||
self.team.do(list)
|
||||
self.coordinate()
|
||||
self.assertEqual(len(self.allUnquitWorkers), 10)
|
||||
# There should be 10 busy workers at this point.
|
||||
self.team.shrink(7)
|
||||
self.performAllOutstandingWork()
|
||||
self.assertEqual(len(self.allUnquitWorkers), 3)
|
||||
|
|
@ -0,0 +1,308 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted._threads._threadworker}.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import gc
|
||||
import weakref
|
||||
|
||||
from twisted.trial.unittest import SynchronousTestCase
|
||||
from threading import ThreadError, local
|
||||
|
||||
from .. import ThreadWorker, LockWorker, AlreadyQuit
|
||||
|
||||
class FakeQueueEmpty(Exception):
|
||||
"""
|
||||
L{FakeQueue}'s C{get} has exhausted the queue.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class WouldDeadlock(Exception):
|
||||
"""
|
||||
If this were a real lock, you'd be deadlocked because the lock would be
|
||||
double-acquired.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class FakeThread(object):
|
||||
"""
|
||||
A fake L{threading.Thread}.
|
||||
|
||||
@ivar target: A target function to run.
|
||||
@type target: L{callable}
|
||||
|
||||
@ivar started: Has this thread been started?
|
||||
@type started: L{bool}
|
||||
"""
|
||||
|
||||
def __init__(self, target):
|
||||
"""
|
||||
Create a L{FakeThread} with a target.
|
||||
"""
|
||||
self.target = target
|
||||
self.started = False
|
||||
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Set the "started" flag.
|
||||
"""
|
||||
self.started = True
|
||||
|
||||
|
||||
|
||||
class FakeQueue(object):
|
||||
"""
|
||||
A fake L{Queue} implementing C{put} and C{get}.
|
||||
|
||||
@ivar items: A lit of items placed by C{put} but not yet retrieved by
|
||||
C{get}.
|
||||
@type items: L{list}
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Create a L{FakeQueue}.
|
||||
"""
|
||||
self.items = []
|
||||
|
||||
|
||||
def put(self, item):
|
||||
"""
|
||||
Put an item into the queue for later retrieval by L{FakeQueue.get}.
|
||||
|
||||
@param item: any object
|
||||
"""
|
||||
self.items.append(item)
|
||||
|
||||
|
||||
def get(self):
|
||||
"""
|
||||
Get an item.
|
||||
|
||||
@return: an item previously put by C{put}.
|
||||
"""
|
||||
if not self.items:
|
||||
raise FakeQueueEmpty()
|
||||
return self.items.pop(0)
|
||||
|
||||
|
||||
|
||||
class FakeLock(object):
|
||||
"""
|
||||
A stand-in for L{threading.Lock}.
|
||||
|
||||
@ivar acquired: Whether this lock is presently acquired.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Create a lock in the un-acquired state.
|
||||
"""
|
||||
self.acquired = False
|
||||
|
||||
|
||||
def acquire(self):
|
||||
"""
|
||||
Acquire the lock. Raise an exception if the lock is already acquired.
|
||||
"""
|
||||
if self.acquired:
|
||||
raise WouldDeadlock()
|
||||
self.acquired = True
|
||||
|
||||
|
||||
def release(self):
|
||||
"""
|
||||
Release the lock. Raise an exception if the lock is not presently
|
||||
acquired.
|
||||
"""
|
||||
if not self.acquired:
|
||||
raise ThreadError()
|
||||
self.acquired = False
|
||||
|
||||
|
||||
|
||||
class ThreadWorkerTests(SynchronousTestCase):
|
||||
"""
|
||||
Tests for L{ThreadWorker}.
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Create a worker with fake threads.
|
||||
"""
|
||||
self.fakeThreads = []
|
||||
self.fakeQueue = FakeQueue()
|
||||
def startThread(target):
|
||||
newThread = FakeThread(target=target)
|
||||
newThread.start()
|
||||
self.fakeThreads.append(newThread)
|
||||
return newThread
|
||||
self.worker = ThreadWorker(startThread, self.fakeQueue)
|
||||
|
||||
|
||||
def test_startsThreadAndPerformsWork(self):
|
||||
"""
|
||||
L{ThreadWorker} calls its C{createThread} callable to create a thread,
|
||||
its C{createQueue} callable to create a queue, and then the thread's
|
||||
target pulls work from that queue.
|
||||
"""
|
||||
self.assertEqual(len(self.fakeThreads), 1)
|
||||
self.assertEqual(self.fakeThreads[0].started, True)
|
||||
def doIt():
|
||||
doIt.done = True
|
||||
doIt.done = False
|
||||
self.worker.do(doIt)
|
||||
self.assertEqual(doIt.done, False)
|
||||
self.assertRaises(FakeQueueEmpty, self.fakeThreads[0].target)
|
||||
self.assertEqual(doIt.done, True)
|
||||
|
||||
|
||||
def test_quitPreventsFutureCalls(self):
|
||||
"""
|
||||
L{ThreadWorker.quit} causes future calls to L{ThreadWorker.do} and
|
||||
L{ThreadWorker.quit} to raise L{AlreadyQuit}.
|
||||
"""
|
||||
self.worker.quit()
|
||||
self.assertRaises(AlreadyQuit, self.worker.quit)
|
||||
self.assertRaises(AlreadyQuit, self.worker.do, list)
|
||||
|
||||
|
||||
|
||||
class LockWorkerTests(SynchronousTestCase):
|
||||
"""
|
||||
Tests for L{LockWorker}.
|
||||
"""
|
||||
|
||||
def test_fakeDeadlock(self):
|
||||
"""
|
||||
The L{FakeLock} test fixture will alert us if there's a potential
|
||||
deadlock.
|
||||
"""
|
||||
lock = FakeLock()
|
||||
lock.acquire()
|
||||
self.assertRaises(WouldDeadlock, lock.acquire)
|
||||
|
||||
|
||||
def test_fakeDoubleRelease(self):
|
||||
"""
|
||||
The L{FakeLock} test fixture will alert us if there's a potential
|
||||
double-release.
|
||||
"""
|
||||
lock = FakeLock()
|
||||
self.assertRaises(ThreadError, lock.release)
|
||||
lock.acquire()
|
||||
self.assertEqual(None, lock.release())
|
||||
self.assertRaises(ThreadError, lock.release)
|
||||
|
||||
|
||||
def test_doExecutesImmediatelyWithLock(self):
|
||||
"""
|
||||
L{LockWorker.do} immediately performs the work it's given, while the
|
||||
lock is acquired.
|
||||
"""
|
||||
storage = local()
|
||||
lock = FakeLock()
|
||||
worker = LockWorker(lock, storage)
|
||||
def work():
|
||||
work.done = True
|
||||
work.acquired = lock.acquired
|
||||
work.done = False
|
||||
worker.do(work)
|
||||
self.assertEqual(work.done, True)
|
||||
self.assertEqual(work.acquired, True)
|
||||
self.assertEqual(lock.acquired, False)
|
||||
|
||||
|
||||
def test_doUnwindsReentrancy(self):
|
||||
"""
|
||||
If L{LockWorker.do} is called recursively, it postpones the inner call
|
||||
until the outer one is complete.
|
||||
"""
|
||||
lock = FakeLock()
|
||||
worker = LockWorker(lock, local())
|
||||
levels = []
|
||||
acquired = []
|
||||
def work():
|
||||
work.level += 1
|
||||
levels.append(work.level)
|
||||
acquired.append(lock.acquired)
|
||||
if len(levels) < 2:
|
||||
worker.do(work)
|
||||
work.level -= 1
|
||||
work.level = 0
|
||||
worker.do(work)
|
||||
self.assertEqual(levels, [1, 1])
|
||||
self.assertEqual(acquired, [True, True])
|
||||
|
||||
|
||||
def test_quit(self):
|
||||
"""
|
||||
L{LockWorker.quit} frees the resources associated with its lock and
|
||||
causes further calls to C{do} and C{quit} to fail.
|
||||
"""
|
||||
lock = FakeLock()
|
||||
ref = weakref.ref(lock)
|
||||
worker = LockWorker(lock, local())
|
||||
lock = None
|
||||
self.assertIsNot(ref(), None)
|
||||
worker.quit()
|
||||
gc.collect()
|
||||
self.assertIs(ref(), None)
|
||||
self.assertRaises(AlreadyQuit, worker.quit)
|
||||
self.assertRaises(AlreadyQuit, worker.do, list)
|
||||
|
||||
|
||||
def test_quitWhileWorking(self):
|
||||
"""
|
||||
If L{LockWorker.quit} is invoked during a call to L{LockWorker.do}, all
|
||||
recursive work scheduled with L{LockWorker.do} will be completed and
|
||||
the lock will be released.
|
||||
"""
|
||||
lock = FakeLock()
|
||||
ref = weakref.ref(lock)
|
||||
worker = LockWorker(lock, local())
|
||||
|
||||
def phase1():
|
||||
worker.do(phase2)
|
||||
worker.quit()
|
||||
self.assertRaises(AlreadyQuit, worker.do, list)
|
||||
phase1.complete = True
|
||||
phase1.complete = False
|
||||
def phase2():
|
||||
phase2.complete = True
|
||||
phase2.acquired = lock.acquired
|
||||
phase2.complete = False
|
||||
worker.do(phase1)
|
||||
self.assertEqual(phase1.complete, True)
|
||||
self.assertEqual(phase2.complete, True)
|
||||
self.assertEqual(lock.acquired, False)
|
||||
lock = None
|
||||
gc.collect()
|
||||
self.assertIs(ref(), None)
|
||||
|
||||
|
||||
def test_quitWhileGettingLock(self):
|
||||
"""
|
||||
If L{LockWorker.do} is called concurrently with L{LockWorker.quit}, and
|
||||
C{quit} wins the race before C{do} gets the lock attribute, then
|
||||
L{AlreadyQuit} will be raised.
|
||||
"""
|
||||
class RacyLockWorker(LockWorker):
|
||||
def _lock_get(self):
|
||||
self.quit()
|
||||
return self.__dict__['_lock']
|
||||
def _lock_set(self, value):
|
||||
self.__dict__['_lock'] = value
|
||||
|
||||
_lock = property(_lock_get, _lock_set)
|
||||
|
||||
worker = RacyLockWorker(FakeLock(), local())
|
||||
self.assertRaises(AlreadyQuit, worker.do, list)
|
||||
|
||||
Loading…
Add table
Add a link
Reference in a new issue