[Checkins] SVN: zc.async/trunk/ svn merge -r84656:85211 svn+ssh://svn.zope.org/repos/main/zc.async/branches/dev

Gary Poster gary at zope.com
Wed Apr 9 23:21:09 EDT 2008


Log message for revision 85212:
  svn merge -r84656:85211 svn+ssh://svn.zope.org/repos/main/zc.async/branches/dev

Changed:
  _U  zc.async/trunk/
  A   zc.async/trunk/CHANGES.txt
  U   zc.async/trunk/buildout.cfg
  U   zc.async/trunk/setup.py
  A   zc.async/trunk/src/zc/async/CHANGES.txt
  U   zc.async/trunk/src/zc/async/README.txt
  A   zc.async/trunk/src/zc/async/README_2.txt
  A   zc.async/trunk/src/zc/async/README_3.txt
  U   zc.async/trunk/src/zc/async/TODO.txt
  U   zc.async/trunk/src/zc/async/__init__.py
  U   zc.async/trunk/src/zc/async/adapters.py
  A   zc.async/trunk/src/zc/async/agent.py
  A   zc.async/trunk/src/zc/async/agent.txt
  A   zc.async/trunk/src/zc/async/basic_dispatcher_policy.zcml
  A   zc.async/trunk/src/zc/async/configure.py
  A   zc.async/trunk/src/zc/async/configure.zcml
  D   zc.async/trunk/src/zc/async/datamanager.py
  D   zc.async/trunk/src/zc/async/datamanager.txt
  A   zc.async/trunk/src/zc/async/dispatcher.py
  A   zc.async/trunk/src/zc/async/dispatcher.txt
  A   zc.async/trunk/src/zc/async/dispatcher.zcml
  D   zc.async/trunk/src/zc/async/engine.py
  U   zc.async/trunk/src/zc/async/instanceuuid.py
  U   zc.async/trunk/src/zc/async/interfaces.py
  A   zc.async/trunk/src/zc/async/job.py
  A   zc.async/trunk/src/zc/async/job.txt
  A   zc.async/trunk/src/zc/async/jobs_and_transactions.txt
  A   zc.async/trunk/src/zc/async/monitor.py
  A   zc.async/trunk/src/zc/async/monitor.txt
  A   zc.async/trunk/src/zc/async/multidb_dispatcher_policy.zcml
  D   zc.async/trunk/src/zc/async/partial.py
  D   zc.async/trunk/src/zc/async/partial.txt
  D   zc.async/trunk/src/zc/async/partials_and_transactions.txt
  A   zc.async/trunk/src/zc/async/queue.py
  A   zc.async/trunk/src/zc/async/queue.txt
  D   zc.async/trunk/src/zc/async/rwproperty.py
  D   zc.async/trunk/src/zc/async/rwproperty.txt
  U   zc.async/trunk/src/zc/async/subscribers.py
  A   zc.async/trunk/src/zc/async/subscribers.txt
  A   zc.async/trunk/src/zc/async/testing.py
  U   zc.async/trunk/src/zc/async/tests.py
  A   zc.async/trunk/src/zc/async/utils.py
  A   zc.async/trunk/src/zc/async/z3tests.py

-=-

Property changes on: zc.async/trunk
___________________________________________________________________
Name: svn:externals
   + bootstrap svn://svn.zope.org/repos/main/zc.buildout/trunk/bootstrap


Copied: zc.async/trunk/CHANGES.txt (from rev 85211, zc.async/branches/dev/CHANGES.txt)
===================================================================
--- zc.async/trunk/CHANGES.txt	                        (rev 0)
+++ zc.async/trunk/CHANGES.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1 @@
+Please see CHANGES.txt in the zc.async package.
\ No newline at end of file

Modified: zc.async/trunk/buildout.cfg
===================================================================
--- zc.async/trunk/buildout.cfg	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/buildout.cfg	2008-04-10 03:21:01 UTC (rev 85212)
@@ -2,21 +2,32 @@
 parts =
     interpreter
     test
+    z3interpreter
+    z3test
 
 develop = .
 
 find-links =
     http://download.zope.org/distribution
 
-extensions = zc.buildoutsftp
-
 [test]
 recipe = zc.recipe.testrunner
 eggs = zc.async
-defaults = '--tests-pattern [fn]?tests --exit-with-status -1'.split()
+defaults = '--tests-pattern ^[fn]?tests --exit-with-status -1'.split()
 working-directory = ${buildout:directory}
 
+[z3test]
+recipe = zc.recipe.testrunner
+eggs = zc.async [z3]
+defaults = "--tests-pattern z3tests --exit-with-status -1".split()
+
+
 [interpreter]
 recipe = zc.recipe.egg
 eggs = zc.async
 interpreter = py
+
+[z3interpreter]
+recipe = zc.recipe.egg
+eggs = zc.async [z3]
+interpreter = z3py

Modified: zc.async/trunk/setup.py
===================================================================
--- zc.async/trunk/setup.py	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/setup.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -2,16 +2,27 @@
 
 from setuptools import setup, find_packages
 
+long_description = (
+    open('src/zc/async/README.txt').read() + "\n" +
+    open('src/zc/async/README_2.txt').read() + "\n" +
+    open('src/zc/async/README_3.txt').read() +
+    "\n\n=======\nChanges\n=======\n\n" +
+    open('src/zc/async/CHANGES.txt').read() + "\n")
+
+f = open('TEST_THIS_REST_BEFORE_REGISTERING.txt', 'w')
+f.write(long_description)
+f.close()
+
 setup(
     name='zc.async',
-    version='0.1',
+    version='1.0',
     packages=find_packages('src'),
     package_dir={'':'src'},
-
-    url='http://svn.zope.org/zc.async',
     zip_safe=False,
-    author='Gary Poster',
+    author='Zope Project',
+    author_email='zope-dev at zope.org',
     description='Perform durable tasks asynchronously',
+    long_description=long_description,
     license='ZPL',
     install_requires=[
         'ZODB3',
@@ -19,15 +30,22 @@
         'rwproperty',
         'uuid',
         'zc.queue',
-        'zc.set',
-        'zc.twist',
-        'zc.twisted',
-        'zope.app.appsetup',
-        'zope.bforest',
+        'zc.dict>=1.2.1',
+        'zc.twist>=1.2',
+        'Twisted>=8.0.1', # 8.0 was setuptools compatible, 8.0.1 had bugfixes.
+        # note that Twisted builds with warnings, at least with py2.4.  It
+        # seems to still build ok.
+        'zope.bforest>=1.1.1',
         'zope.component',
         'zope.i18nmessageid',
         'zope.interface',
         'zope.testing',
+        'rwproperty',
         ],
+    extras_require={
+        'z3':[
+            'zc.z3monitor',
+            'simplejson',
+            ]},
     include_package_data=True,
     )

Copied: zc.async/trunk/src/zc/async/CHANGES.txt (from rev 85211, zc.async/branches/dev/src/zc/async/CHANGES.txt)
===================================================================
--- zc.async/trunk/src/zc/async/CHANGES.txt	                        (rev 0)
+++ zc.async/trunk/src/zc/async/CHANGES.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,4 @@
+1.0 (2008-04-09)
+================
+
+Initial release.
\ No newline at end of file

Modified: zc.async/trunk/src/zc/async/README.txt
===================================================================
--- zc.async/trunk/src/zc/async/README.txt	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/README.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -1,1005 +1,1219 @@
-========
+~~~~~~~~
 zc.async
-========
+~~~~~~~~
 
+.. contents::
+
+============
+Introduction
+============
+
 Goals
 =====
 
-The zc.async package provides a way to make scalable asynchronous application
-calls.  Here are some example core use cases.
+The zc.async package provides a way to schedule jobs, particularly
+those working within the context of the ZODB, to be performed
+out-of-band from your current thread.  The job might be done in another
+thread or another process.  Here are some example core use cases.
 
-- You want to let users create PDFs through your application.  This can take
-  quite a bit of time, and will use both system resources and one of the
-  precious application threads until it is done.  Naively done, six or seven
-  simultaneous PDF requests could make your application unresponsive to any
-  other users.  Using zc.async, the job can be sent to other machines; you
-  can write AJAX to poll for job completion, or you might have the end of the
-  job send an email to deliver the PDF.
+- You want to let users do something that requires a lot of system
+  resources from your application, such as creating a large PDF.  Naively
+  done, six or seven simultaneous PDF requests will consume your
+  application thread pool and could make your application unresponsive to
+  any other users.
 
 - You want to let users spider a web site; communicate with a credit card
   company; query a large, slow LDAP database on another machine; or do
-  some other action that generates network requests from the server. 
-  Again, if something goes wrong, several requests could make your
-  application unresponsive.  With zc.async, this could be serialized,
-  with objects indicating that a spidering is in-progress; and performed
-  out of the main application threads.
+  some other action that generates network requests from the server.
+  System resources might not be a problem, but, again, if something goes
+  wrong, several requests could make your application unresponsive.
 
+- Perhaps because of excessive conflict errors, you want to serialize work
+  that can be done asynchronously, such as updating a single data structure
+  like a catalog index.
+
+- You want to decompose and parallelize a single job across many machines so
+  it can be finished faster.
+
 - You have an application job in the ZODB that you discover is taking
   longer than users can handle, even after you optimize it.  You want a
   quick fix to move the work out-of-band.
 
-A common thread with these core use cases is that end-users need to be
-able to start expensive processes on demand.  These are not scheduled
-tasks. You may want to have a bank of workers that can perform the task.
-You may want to be able to quickly and easily convert an application
-server process to a worker process, or vice versa, on the basis of
-traffic and demand.
+Many of these core use cases involve end-users being able to start potentially
+expensive processes, on demand. Basic scheduled tasks are also provided by this
+package, though recurrence must be something you arrange.
 
-Another Zope 3 package that approaches somewhat similar use cases to
-these is lovely.remotetask (http://svn.zope.org/lovely.remotetask/). 
-There appear to be some notable differences; describing them
-authoritatively and without possible misrepresentation will have to
-wait for another day.
+Multiple processes can claim and perform jobs.  Jobs can be (manually)
+decomposed for serial or parallel processing of the component parts.
 
-Another set of use cases center around scheduling: we need to retry an
-asynchronous task after a given amount of time; or we want to have a
-requested job happen late at night; or we want to have a job happen
-regularly.  The Zope 3 scheduler
-(http://svn.zope.org/Zope3/trunk/src/scheduler/) approaches the last of
-these tasks with more infrastructure than zc.async, as arguably does a
-typical "cron wget" approach.  However, both approaches are prone to
-serious problems when the scheduled task takes more time than expected,
-and one instance of a task overlaps the previous one, sometimes causing
-disastrous problems.  By using zc.async partials to represent the
-pending result, and even to schedule the next call, this problem can be
-alleviated.
-
 History
 =======
 
 This is a second-generation design.  The first generation was `zasync`,
 a mission-critical and successful Zope 2 product in use for a number of
-high-volume Zope 2 installations.  It had the following goals:
+high-volume Zope 2 installations.  [#history]_ It's worthwhile noting
+that zc.async has absolutely no backwards comapatibility with zasync.
 
-- be scalable, so that another process or machine could do the asynchronous
-  work;
+Design Overview
+===============
 
-- support lengthy jobs outside of the ZODB;
+---------------
+Overview: Usage
+---------------
 
-- support lengthy jobs inside the ZODB;
+Looking at the design from the perspective of regular usage, your code
+obtains a ``queue``, which is a place to queue jobs to be performed
+asynchronously.  Your application calls ``put`` on the queue to register
+a job.  The job must be a pickleable callable: a global function, a
+callable persistent object, a method of a persistent object, or a
+special zc.async.job.Job object, discussed later.  The job by default is
+regsitered to be performed as soon as possible, but can be registered to
+be called at a certain time.
 
-- be recoverable, so that crashes would not lose work;
+The ``put`` call will return a zc.async.job.Job object.  This
+object represents both the callable and its deferred result.  It has
+information about the job requested, and the state and result of
+performing the job.  An example spelling for registering a job might be
+``self.pending_result = queue.put(self.performSpider)``.  The returned
+object can be simply persisted and polled to see when the job
+is complete; or it can be configured to do additional work when it
+completes.
 
-- be discoverable, so that logs and web interfaces give a view into the work
-  being done asynchronously;
+-------------------
+Overview: Mechanism
+-------------------
 
-- be easily extendible, to do new jobs; and
+Multiple processes, typically spread across multiple machines, can use
+ZEO to connect to the queue and claim and perform work.  As with other
+collections of processes that share a database with ZEO, these processes
+generally should share the same software (though some variations on this
+constraint should be possible).
 
-- support graceful job expiration and cancellation.
+A process that should claim and perform work, in addition to a database
+connection and the necessary software, needs a ``dispatcher`` with a
+``reactor`` to provide a heartbeat.  The dispatcher will rely on one or more
+persistent ``agents`` in the queue (in the database) to determine which jobs
+it should perform.
 
-It met its goals well in some areas and adequately in others.
+A ``dispatcher`` is in charge of dispatching queued work for a given
+process to worker threads.  It works with one or more queues and a
+single reactor.  It has a universally unique identifier (UUID), which is
+usually an identifier of the application instance in which it is
+running.  The dispatcher starts jobs in dedicated threads.
 
-Based on experience with the first generation, this second generation
-identifies several areas of improvement from the first design, and adds
-several goals.
+A ``reactor`` is something that can provide an eternal loop, or heartbeat,
+to power the dispatcher.  It can be the main twisted reactor (in the
+main thread); another instance of a twisted reactor (in a child thread);
+or any object that implements a small subset of the twisted reactor
+interface (see discussion in dispatcher.txt, and example testing reactor in
+testing.py, used below).
 
-- Improvements
+An ``agent`` is a persistent object in a queue that is associated with a
+dispatcher and is responsible for picking jobs and keeping track of
+them. Zero or more agents within a queue can be associated with a
+dispatcher.  Each agent for a given dispatcher in a given queue is
+identified uniquely with a name [#identifying_agent]_.
 
-  * More carefully delineate the roles of the comprising components.
+Generally, these work together as follows.  The reactor calls the
+dispatcher. The dispatcher tries to find the mapping of queues in the
+database root under a key of ``zc.async`` (see constant
+zc.async.interfaces.KEY).  If it finds the mapping, it iterates
+over the queues (the mapping's values) and asks each queue for the
+agents associated with the dispatcher's UUID.  The dispatcher then is
+responsible for seeing what jobs its agents want to do from the queue,
+and providing threads and connections for the work to be done.  The
+dispatcher then asks the reactor to call itself again in a few seconds.
 
-    The zasync design has three main components, as divided by their
-    roles: persistent deferreds, now called partials; persistent
-    deferred queues (the original zasync's "asynchronous call manager");
-    and asynchronous workers (the original zasync ZEO client).  The
-    zasync 1.x design blurred the lines between the three components
-    such that the component parts could only be replaced with
-    difficulty, if at all. A goal for the 2.x design is to clearly
-    define the role for each of three components such that, for
-    instance, a user of a persistent deferred does not need to know
-    about the persistent deferred queue.
+Reading More
+============
 
-  * Improve scalability of asynchronous workers.
+This document continues on with three other main sections: `Usage`_,
+`Configuration without Zope 3`_, and `Configuration with Zope 3`_.
 
-    The 1.x line was initially designed for a single asynchronous worker,
-    which could be put on another machine thanks to ZEO.  Tarek ZiadŽ of
-    Nuxeo wrote zasyncdispatcher, which allowed multiple asynchronous workers
-    to accept work, allowing multiple processes and multiple machines to
-    divide and conquer. It worked around the limitations of the original
-    zasync design to provide even more scalability. However, it was forced to
-    divide up work well before a given worker looks at the queue.
+Other documents in the package are primarily geared as maintainer
+documentation, though the author has tried to make them readable and
+understandable.
 
-    While dividing work earlier allows guesses and heuristics a chance to
-    predict what worker might be more free in the future, a more reliable
-    approach is to let the worker gauge whether it should take a job at the
-    time the job is taken. Perhaps the worker will choose based on the
-    worker's load, or other concurrent jobs in the process, or other details.
-    A goal for the 2.x line is to more directly support this type of
-    scalability.
+=====
+Usage
+=====
 
-  * Improve scalability of registering deferreds.
+Overview and Basics
+===================
 
-    The 1.x line initially wasn't concerned about very many concurrent
-    asynchronous requests.  When this situation was encountered, it caused
-    ConflictErrors between the worker process reading the deferred queue
-    and the code that was adding the deferreds.  Thanks to Nuxeo, this
-    problem was addressed in the 1.x line.  A goal for the new version
-    is to include and improve upon the 1.x solution.
+The basic usage of zc.async does not depend on a particular configuration
+of the back-end mechanism for getting the jobs done.  Moreover, on some
+teams, it will be the responsibility of one person or group to configure
+zc.async, but a service available to the code of all team members.  Therefore,
+we begin our detailed discussion with regular usage, assuming configuration
+has already happened.  Subsequent sections discuss configuring zc.async
+with and without Zope 3.
 
-  * Make it even simpler to provide new jobs.
+So, let's assume we have a queue installed into a ZODB, with hidden
+dispatchers, reactors and agents all waiting to fulfill jobs placed into
+the queue.  We start with a connection object, ``conn``, and some
+convenience functions introduced along the way that help us simulate
+time passing and work being done [#usageSetUp]_.
 
-    In the first version, `plugins` performed jobs.  They had a specific
-    API and they had to be configured.  A goal for the new version is to
-    require no specific API for jobs, and to not require any configuration.
+-------------------
+Obtaining the queue
+-------------------
 
-  * Improve report information, especially through the web.
+First, how do we get the queue?  Your installation may have some
+conveniences.  For instance, the Zope 3 configuration described below
+makes it possible to get the primary queue with an adaptation call like
+``zc.async.interfaces.IQueue(a_persistent_object_with_db_connection)``.
 
-    The component that the first version of zasync provided to do the
-    asynchronous work, the zasync client, provided very verbose logs of the
-    jobs done, but they were hard to read and also did not have a through-
-    the-web parallel.  Two goals for the new version are to improve the
-    usefulness of the filesystem logs and to include more complete
-    through-the-web visibility of the status of the provided asynchronous
-    clients.
+But failing that, queues are always exected to be in a zc.async.queue.Queues
+mapping found off the ZODB root in a key defined by the constant
+zc.async.interfaces.KEY.
 
-  * Make it easier to configure and start, especially for small deployments.
+    >>> import zc.async.interfaces
+    >>> zc.async.interfaces.KEY
+    'zc.async'
+    >>> root = conn.root()
+    >>> queues = root[zc.async.interfaces.KEY]
+    >>> import zc.async.queue
+    >>> isinstance(queues, zc.async.queue.Queues)
+    True
 
-    A significant barrier to experimentation and deployment of the 1.x line
-    was the difficulty in configuration.  The 1.x line relied on ZConfig
-    for zasync client configuration, demanding non-extensible
-    similar-yet-subtly-different .conf files like the Zope conf files.
-    The 2.x line plans to provide code that Zope 3 can configure to run in
-    the same process as a standard Zope 3 application.  This means that
-    development instances can start a zasync quickly and easily.  It also
-    means that processes can be reallocated on the fly during production use,
-    so that a machine being used as a zasync process can quickly be converted
-    to a web server, if needed, and vice versa.  It further means that the
-    Zope web server can be used for through-the-web reports of the current
-    zasync process state.
+As the name implies, ``queues`` is a collection of queues. As discussed later,
+it's possible to have multiple queues, as a tool to distribute and control
+work. We will assume a convention of a queue being available in the '' (empty
+string).
 
-- New goals
+    >>> queues.keys()
+    ['']
+    >>> queue = queues['']
 
-  * Support intermediate return calls so that jobs can report back how they
-    are doing.
+---------
+queue.put
+---------
 
-    A frequent request from users of zasync 1.x was the ability for a long-
-    running asynchronous process to report back progress to the original
-    requester.  The 2.x line addresses this with three changes:
+Now we want to actually get some work done.  The simplest case is simple
+to perform: pass a persistable callable to the queue's ``put`` method and
+commit the transaction.
 
-    + persistent deferreds are annotatable;
+    >>> def send_message():
+    ...     print "imagine this sent a message to another machine"
+    >>> job = queue.put(send_message)
+    >>> import transaction
+    >>> transaction.commit()
 
-    + persistent deferreds should not be modified in an asynchronous
-      job that does work (though they may be read);
+Note that this won't really work in an interactive session: the callable needs
+to be picklable, as discussed above, so ``send_message`` would need to be
+a module global, for instance.
 
-    + jobs can request another deferred in a synchronous process that
-      annotates the deferred with progress status or other information.
+The ``put`` returned a job.  Now we need to wait for the job to be
+performed.  We would normally do this by really waiting.  For our
+examples, we will use a helper function called ``wait_for`` to wait for
+the job to be completed [#wait_for]_.
 
-    Because of relatively recent changes in ZODB--multi version concurrency
-    control--this simple pattern should not generate conflict errors.
+    >>> wait_for(job)
+    imagine this sent a message to another machine
 
-  * Support time-delayed calls.
+We also could have used the method of a persistent object.  Here's another
+quick example.
 
-    Retries and other use cases make time-delayed deferred calls desirable.
-    The new design supports these sort of calls.
+First we define a simple persistent.Persistent subclass and put an instance of
+it in the database [#commit_for_multidatabase]_.
 
-It's worthwhile noting that zc.async has absolutely no backwards
-comapatibility with zasync.
+    >>> import persistent
+    >>> class Demo(persistent.Persistent):
+    ...     counter = 0
+    ...     def increase(self, value=1):
+    ...         self.counter += value
+    ...
+    >>> root['demo'] = Demo()
+    >>> transaction.commit()
 
-Status
-======
+Now we can put the ``demo.increase`` method in the queue.
 
-This is alpha software.  Tests range for various components range from
-very good to preliminary.  All existing tests pass.  Even the first
-"final" release is not scheduled to meet all goals identified in the
-history discussion above.  See TODO.txt in this package.
+    >>> root['demo'].counter
+    0
+    >>> job = queue.put(root['demo'].increase)
+    >>> transaction.commit()
 
-Dependencies
-============
+    >>> wait_for(job)
+    >>> root['demo'].counter
+    1
 
-zc.async relies on the uuid module as currently found in the Python 2.5
-library (this can be used with earlier versions of Python).  See
-http://svn.python.org/view/python/trunk/Lib/uuid.py?view=auto.
+The method was called, and the persistent object modified!
 
-It currently relies on the Twisted reactor running, but does not require
-that the Twisted reactor be used for any particular task (such as being
-used as a web server, for instance).  That said, when used with Zope,
-zc.async currently requires that a Twisted server be used.  Future
-revisions may provide alternate worker engines for Medusa, as used by
-ZServer; though, of course, reactor tasks would either not be supported
-or be reduced to Medusa capabilities.
+To reiterate, only pickleable callables such as global functions and the
+methods of persistent objects can be used. This rules out, for instance,
+lambdas and other functions created dynamically. As we'll see below, the job
+instance can help us out there somewhat by offering closure-like features.
 
-zc.twist (http://svn.zope.org/zc.twist/) is used for the Twisted
-reactor/ZODB interactions.
+---------------
+Scheduled Calls
+---------------
 
-zc.set (http://svn.zope.org/zc.set/) and zc.queue
-(http://svn.zope.org/zc.queue/) are used for internal data structures.
+You can also pass a datetime.datetime to schedule a call.  A datetime
+without a timezone is considered to be in the UTC timezone.
 
-Design Overview
-===============
+    >>> t = transaction.begin()
+    >>> import datetime
+    >>> import pytz
+    >>> datetime.datetime.now(pytz.UTC)
+    datetime.datetime(2006, 8, 10, 15, 44, 33, 211, tzinfo=<UTC>)
+    >>> job = queue.put(
+    ...     send_message, begin_after=datetime.datetime(
+    ...         2006, 8, 10, 15, 56, tzinfo=pytz.UTC))
+    >>> job.begin_after
+    datetime.datetime(2006, 8, 10, 15, 56, tzinfo=<UTC>)
+    >>> transaction.commit()
+    >>> wait_for(job, attempts=2) # +5 virtual seconds
+    TIME OUT
+    >>> wait_for(job, attempts=2) # +5 virtual seconds
+    TIME OUT
+    >>> datetime.datetime.now(pytz.UTC)
+    datetime.datetime(2006, 8, 10, 15, 44, 43, 211, tzinfo=<UTC>)
 
-An application typically has a single zc.async datamanager, which is the
-object that client code will obtain to use the primary zc.async
-capabilities.  Expected spelling to get the datamanager is something
-like ``dm = zc.async.interfaces.IDataManager(context)``.  The
-datamanager is typically stored in the root of the ZODB, alongside the
-application object, with a key of 'zc.async.datamanager', but the
-adapter that obtains the data manager can be replaced to point to a
-different location.
+    >>> zc.async.testing.set_now(datetime.datetime(
+    ...     2006, 8, 10, 15, 56, tzinfo=pytz.UTC))
+    >>> wait_for(job)
+    imagine this sent a message to another machine
+    >>> datetime.datetime.now(pytz.UTC) >= job.begin_after
+    True
 
-Clients of zc.async need to identify the job they want done.  It must be
-a single pickleable callable: a global function, a callable persistent
-object, a method of a persistent object, or a special
-zc.async.partial.Partial object, discussed later.  This job should be
-passed to the `put` method of one of two queues on the datamanager: a
-`thread` queue and a `reactor` queue.
+If you set a time that has already passed, it will be run as if it had
+been set to run as soon as possible [#already_passed]_...unless the job
+has already timed out, in which case the job fails with an
+abort [#already_passed_timed_out]_.
 
-- A `thread` job is one that is to be performed in a thread with a
-  dedicated ZODB connection.  It's the simplest to use for typical
-  tasks. A thread job also may be overkill for some jobs that don't need
-  a connection constantly.  It also is not friendly to Twisted services. 
+The queue's `put` method is the essential API.  Other methods are used
+to introspect, but are not needed for basic usage.
 
-- A `reactor` job is a non-blocking job to be performed in the main thread,
-  in a call scheduled by the Twisted reactor.  It has some gotchas (see
-  zc.twist's README), but it can be good for jobs that don't need a
-  constant connection, and for jobs that can leverage Twisted code.
+But what is that result of the `put` call in the examples above?  A
+job?  What do you do with that?
 
-The `put` call will return an object that represents the job--both the
-information about the job requested, and the state and result of
-performing the job.  An example spelling for this might be
-``self.pending_result = dm.thread.put(self.performSpider)``.  The
-returned object can be simply persisted and polled to see when the job
-is complete; or it can be set to do tasks when it completes.
+Jobs
+====
 
-If nothing else is done, the call will be done as soon as possible:
-zc.async client code shouldn't typically have to worry further.  The
-client code could have specifed that the job should `begin_after` a
-certain datetime; or that the job should `begin_by` a duration after
-`begin_after` or else it will fail; or that the job should be performed
-by a certain process, or not by a certain process.
+--------
+Overview
+--------
 
-While client code can forget about the rest of the design, people
-configuring a production system using zc.async need to know a bit more.
-Calls are handled by workers, each of which is a persistent object
-matched with a software instance identified by a UUID [#uuid]_.  A
-worker is responsible for keeping track of current jobs and for
-implementing a policy for selecting jobs from the main datamanager
-queues. A worker's software instance may simultaneously perform other
-tasks (such as handle standard web requests).  Each worker is
-responsible for claiming and performing calls in its main thread or
-additional threads.  To have multiple workers on the same queue of
-tasks, share the database with ZEO. Workers are driven by `engines`,
-non-persistent objects that are alive only for a single process, and
-that are responsible for providing the heart-beat for a matched worker.
+The result of a call to `put` returns an IJob.  The
+job represents the pending result.  This object has a lot of
+functionality that's explored in other documents in this package, and
+demonstrated a bit below, but here's a summary.  
 
-Set Up
-======
+- You can introspect, and even modify, the call and its
+  arguments.
 
-By default, zc.async expects to have an object in the root of
-the ZODB, alongside the application object, with a key of
-'zc.async.datamanager'.  The package includes subscribers to
-zope.app.appsetup.interfaces.IDatabaseOpenedEvent that sets an instance
-up in this location if one does not exist [#subscribers]_.
+- You can specify that the job should be run serially with others
+  of a given identifier.
 
-Let's assume we have a reference to a database named `db`, a connection
-named `conn`, a `root`, an application in the 'app' key [#setup]_, and a
-handler named `installerAndNotifier` [#handlers]_.  If we provide a
-handler, fire the event and examine the root, we will see the new
-datamanager.
+- You can specify other calls that should be made on the basis of the
+  result of this call.
 
-    >>> import zope.component
-    >>> import zc.async.subscribers
-    >>> zope.component.provideHandler(installerAndNotifier) # see footnotes
-    ... # for explanation of where installerAndNotifier came from, and what
-    ... # it is.
-    >>> import zope.event
-    >>> import zope.app.appsetup.interfaces
-    >>> zope.event.notify(zope.app.appsetup.interfaces.DatabaseOpened(db))
-    >>> import transaction
-    >>> t = transaction.begin()
-    >>> root['zc.async.datamanager'] # doctest: +ELLIPSIS
-    <zc.async.datamanager.DataManager object at ...>
+- You can persist a reference to it, and periodically (after syncing
+  your connection with the database, which happens whenever you begin or
+  commit a transaction) check its `state` to see if it is equal to
+  zc.async.interfaces.COMPLETED.  When it is, the call has run to
+  completion, either to success or an exception.
 
-The default adapter from persistent object to datamanager will get us
-the same result; adapting a persistent object to IDataManager is the
-preferred spelling.
+- You can look at the result of the call (once COMPLETED).  It might be
+  the result you expect, or a zc.twist.Failure, which is a
+  subclass of twisted.python.failure.Failure, way to safely communicate
+  exceptions across connections and machines and processes.
 
-    >>> import zc.async.adapters
-    >>> zope.component.provideAdapter(
-    ...     zc.async.adapters.defaultDataManagerAdapter)
-    >>> import zc.async.interfaces
-    >>> zc.async.interfaces.IDataManager(app) # doctest: +ELLIPSIS
-    <zc.async.datamanager.DataManager object at ...>
+-------
+Results
+-------
 
-Normally, each process discovers or creates its UUID, and starts an
-engine to do work.  The engine is a non-persistent object that
-participates in the Twisted main loop.  It discovers or creates the
-persistent worker object associated with the instance UUID in the
-datamanager's `workers` mapping, and starts polling.  This would have
-happened when the data manager was announced as available in the
-InstallerAndNotifier above.
+So here's a simple story.  What if you want to get a result back from a
+call?  Look at the job.result after the call is COMPLETED.
 
-    >>> from zope.component import eventtesting
-    >>> evs = eventtesting.getEvents(
-    ...     zc.async.interfaces.IDataManagerAvailableEvent)
-    >>> evs # doctest: +ELLIPSIS
-    [<zc.async.interfaces.DataManagerAvailable object at ...>]
-
-So now we would have had a subscriber that installed the worker in the
-data manager.  But right now there are no workers, just because we
-didn't want to talk about the next step yet.
-
-    >>> len(zc.async.interfaces.IDataManager(app).workers)
-    0
-
-Let's install the subscriber we need and refire the event.  Our worker
-will have a UUID created for it, and then it will be installed with the
-UUID as key.  We can't actually use the same event because it has an
-object from a different connection, so we'll recreate it.  We'll then use
-a magic `time_passes` function to simulate the Twisted reactor cycling and
-firing scheduled calls.  After we sync our connection with the database,
-the worker appears.  It is tied to the engineUUID of the current engine.
-
-    >>> zope.component.provideHandler(
-    ...     zc.async.subscribers.installTwistedEngine)
-    >>> zope.event.notify(
-    ...     zc.async.interfaces.DataManagerAvailable(
-    ...         root['zc.async.datamanager']))
-    >>> time_passes()
+    >>> def imaginaryNetworkCall():
+    ...     # let's imagine this makes a network call...
+    ...     return "200 OK"
+    ...
+    >>> job = queue.put(imaginaryNetworkCall)
+    >>> print job.result
+    None
+    >>> job.status == zc.async.interfaces.PENDING
     True
-    >>> t = transaction.begin() # sync
-    >>> len(zc.async.interfaces.IDataManager(app).workers)
-    1
-    >>> zc.async.interfaces.IDataManager(app).workers.values()[0]
-    ... # doctest: +ELLIPSIS
-    <zc.async.datamanager.Worker object at ...>
-    >>> (zc.async.interfaces.IDataManager(app).workers.values()[0].engineUUID
-    ...  is not None)
+    >>> transaction.commit()
+    >>> wait_for(job)
+    >>> t = transaction.begin()
+    >>> job.result
+    '200 OK'
+    >>> job.status == zc.async.interfaces.COMPLETED
     True
 
-The instance UUID, in hex, is stored in INSTANCE_HOME/etc/uuid.txt
+--------
+Closures
+--------
 
-    >>> import uuid
-    >>> import os
-    >>> f = open(os.path.join(
-    ...     os.environ.get("INSTANCE_HOME"), 'etc', 'uuid.txt'))
-    >>> uuid_hex = f.readline().strip()
-    >>> f.close()
-    >>> uuid = uuid.UUID(uuid_hex)
-    >>> worker = zc.async.interfaces.IDataManager(app).workers[uuid]
-    >>> worker.UUID == uuid
-    True
+What's more, you can pass a Job to the `put` call.  This means that you
+aren't constrained to simply having simple non-argument calls performed
+asynchronously, but you can pass a job with a call, arguments, and
+keyword arguments--effectively, a kind of closure.  Here's a quick example. 
+We'll use the demo object, and its increase method, that we introduced
+above, but this time we'll include some arguments [#job]_.
 
-The file is intended to stay in the instance home as a persistent identifier
-of this particular worker.
+With placeful arguments:
 
-Our worker has `thread` and `reactor` jobs, with all jobs available.  By
-default, a worker begins offering a single thread job and a four
-"simultaneous" reactor jobs.  This can be changed simply by changing the value
-on the worker and committing.
+    >>> t = transaction.begin()
+    >>> job = queue.put(
+    ...     zc.async.job.Job(root['demo'].increase, 5))
+    >>> transaction.commit()
+    >>> wait_for(job)
+    >>> t = transaction.begin()
+    >>> root['demo'].counter
+    6
 
-    >>> worker.thread.size
-    1
-    >>> worker.reactor.size
-    4
-    >>> len(worker.thread)
-    0
-    >>> len(worker.reactor)
-    0
+With keyword arguments:
 
-But what are `thread` and `reactor` jobs?
+    >>> job = queue.put(
+    ...     zc.async.job.Job(root['demo'].increase, value=10))
+    >>> transaction.commit()
+    >>> wait_for(job)
+    >>> t = transaction.begin()
+    >>> root['demo'].counter
+    16
 
-A `thread` job is one that is performed in a thread with a dedicated
-ZODB connection.  It's the simplest to use for typical tasks.
+Note that arguments to these jobs can be any persistable object.
 
-A thread job also may be overkill for some jobs that don't need a
-connection constantly.  It also is not friendly to Twisted services. 
+--------
+Failures
+--------
 
-A `reactor` job is performed in the main thread, in a call scheduled by
-the Twisted reactor.  It has some gotchas (see zc.twist's README), but it
-can be good for jobs that don't need a constant connection, and for jobs
-that can leverage Twisted code.
+What happens if a call raises an exception?  The return value is a Failure.
 
-We now have a simple set up: a data manager with a single worker.  Let's start
-making some asynchronous calls!
+    >>> def I_am_a_bad_bad_function():
+    ...     return foo + bar
+    ...
+    >>> job = queue.put(I_am_a_bad_bad_function)
+    >>> transaction.commit()
+    >>> wait_for(job)
+    >>> t = transaction.begin()
+    >>> job.result
+    <zc.twist.Failure exceptions.NameError>
 
-Basic Usage: IManager.add
-=========================
+Failures can provide useful information such as tracebacks.
 
-The simplest case is simple to perform: pass a persistable callable to the
-`put` method of one of the manager's queues.  We'll make reactor calls first.
+    >>> print job.result.getTraceback()
+    ... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    Traceback (most recent call last):
+    ...
+    exceptions.NameError: global name 'foo' is not defined
+    <BLANKLINE>
 
-    >>> from zc.async import interfaces
-    >>> dm = zc.async.interfaces.IDataManager(app)
-    >>> def send_message():
-    ...     print "imagine this sent a message to another machine"
-    >>> partial = dm.reactor.put(send_message)
-    >>> transaction.commit()
+---------
+Callbacks
+---------
 
-Now a few cycles need to pass in order to have the job performed.  We'll
-use a helper function called `time_flies` to simulate the asynchronous
-cycles necessary for the manager and workers to perform the task.
+You can register callbacks to handle the result of a job, whether a
+Failure or another result.
 
-    >>> dm.workers.values()[0].poll_seconds
-    5
-    >>> count = time_flies(5)
-    imagine this sent a message to another machine
+Note that, unlike callbacks on a Twisted deferred, these callbacks do not
+change the result of the original job. Since callbacks are jobs, you can chain
+results, but generally callbacks for the same job all get the same result as
+input.
 
-We also could have used the method of a persistent object.  Here's another
-quick example.
+Also ote that, during execution of a callback, there is no guarantee that
+the callback will be processed on the same machine as the main call.  Also,
+some of the ``local`` functions, discussed below, will not work as desired.
 
-    >>> import persistent
-    >>> class Demo(persistent.Persistent):
-    ...     counter = 0
-    ...     def increase(self, value=1):
-    ...         self.counter += value
+Here's a simple example of reacting to a success.
+
+    >>> def I_scribble_on_strings(string):
+    ...     return string + ": SCRIBBLED"
     ...
-    >>> app['demo'] = Demo()
+    >>> job = queue.put(imaginaryNetworkCall)
+    >>> callback = job.addCallback(I_scribble_on_strings)
     >>> transaction.commit()
-    >>> app['demo'].counter
-    0
-    >>> partial = dm.reactor.put(app['demo'].increase)
-    >>> transaction.commit()
-    >>> count = time_flies(5)
+    >>> wait_for(job)
+    >>> job.result
+    '200 OK'
+    >>> callback.result
+    '200 OK: SCRIBBLED'
 
-We need to sync our connection so that we get the changes in other
-connections: we can do that with a transaction begin, commit, or abort.
+Here's a more complex example of handling a Failure, and then chaining
+a subsequent callback.
 
-    >>> app['demo'].counter
-    0
-    >>> t = transaction.begin()
-    >>> app['demo'].counter
-    1
+    >>> def I_handle_NameErrors(failure):
+    ...     failure.trap(NameError) # see twisted.python.failure.Failure docs
+    ...     return 'I handled a name error'
+    ...
+    >>> job = queue.put(I_am_a_bad_bad_function)
+    >>> callback1 = job.addCallbacks(failure=I_handle_NameErrors)
+    >>> callback2 = callback1.addCallback(I_scribble_on_strings)
+    >>> transaction.commit()
+    >>> wait_for(job)
+    >>> job.result
+    <zc.twist.Failure exceptions.NameError>
+    >>> callback1.result
+    'I handled a name error'
+    >>> callback2.result
+    'I handled a name error: SCRIBBLED'
 
-The method was called, and the persistent object modified!
+Advanced Techniques and Tools
+=============================
 
-You can also pass a timezone-aware datetime.datetime to schedule a
-call.  The safest thing to use is a UTC timezone.
+**Important**
 
-    >>> t = transaction.begin()
-    >>> import datetime
-    >>> import pytz
-    >>> datetime.datetime.now(pytz.UTC)
-    datetime.datetime(2006, 8, 10, 15, 44, 32, 211, tzinfo=<UTC>)
-    >>> partial = dm.reactor.put(
-    ...     send_message, datetime.datetime(
-    ...         2006, 8, 10, 15, 45, tzinfo=pytz.UTC))
-    >>> partial.begin_after
-    datetime.datetime(2006, 8, 10, 15, 45, tzinfo=<UTC>)
-    >>> transaction.commit()
-    >>> count = time_flies(10)
-    >>> count = time_flies(10)
-    >>> count = time_flies(5)
-    >>> count = time_flies(5)
-    imagine this sent a message to another machine
-    >>> datetime.datetime.now(pytz.UTC)
-    datetime.datetime(2006, 8, 10, 15, 45, 2, 211, tzinfo=<UTC>)
+The job and its functionality described above are the core zc.async tools.
 
-If you set a time that has already passed, it will be run as if it had
-been set to run immediately.
+The following are advanced techniques and tools of various complexities. You
+can use zc.async very productively without ever understanding or using them. If
+the following do not make sense to you now, please just move on for now.
 
-    >>> t = transaction.begin()
-    >>> partial = dm.reactor.put(
-    ...     send_message, datetime.datetime(2006, 7, 21, 12, tzinfo=pytz.UTC))
-    >>> transaction.commit()
-    >>> count = time_flies(5)
-    imagine this sent a message to another machine
+--------------
+zc.async.local
+--------------
 
-The `put` method of the thread and reactor queues is the manager's
-entire application API.  Other methods are used to introspect, but are
-not needed for basic usage.
+Jobs always run their callables in a thread, within the context of a
+connection to the ZODB. The callables have access to five special
+thread-local functions if they need them for special uses.  These are
+available off of zc.async.local.
 
-But what is that result of the `put` call in the examples above?  A
-partial?  What do you do with that?
+``zc.async.local.getJob()``
+    The ``getJob`` function can be used to examine the job, to get
+    a connection off of ``_p_jar``, to get the queue into which the job
+    was put, or other uses.
 
-Partials
-========
+``zc.async.local.setLiveAnnotation(name, value, job=None)``
+    The ``setLiveAnnotation`` tells the agent to set an annotation on a job,
+    by default the current job, *in another connection*.  This makes it
+    possible to send messages about progress or for coordination while in the
+    middle of other work.
+    
+    As a simple rule, only send immutable objects like strings or
+    numbers as values [#setLiveAnnotation]_.
 
-The result of a call to `put` returns an IDataManagerPartial.  The
-partial represents the pending call.  This object has a lot of
-functionality that's explored in other documents in this package, and
-demostrated a bit below, but here's a summary.  
+``zc.async.local.getLiveAnnotation(name, default=None, timeout=0, poll=1, job=None)``
+    The ``getLiveAnnotation`` tells the agent to get an annotation for a job,
+    by default the current job, *from another connection*.  This makes it
+    possible to send messages about progress or for coordination while in the
+    middle of other work.  
+    
+    As a simple rule, only ask for annotation values that will be
+    immutable objects like strings or numbers [#getLiveAnnotation]_.
 
-- You can introspect it to look at, and even modify, the call and its
-  arguments.
+    If the ``timeout`` argument is set to a positive float or int, the function
+    will wait that at least that number of seconds until an annotation of the
+    given name is available. Otherwise, it will return the ``default`` if the
+    name is not present in the annotations. The ``poll`` argument specifies
+    approximately how often to poll for the annotation, in seconds (to be more
+    precise, a subsequent poll will be min(poll, remaining seconds until
+    timeout) seconds away).
 
-- You can specify that the partial may or may not be run by given
-  workers (identifying them by their UUID).
+``zc.async.local.getReactor()``
+    The ``getReactor`` function returns the job's dispatcher's reactor.  The
+    ``getLiveAnnotation`` and ``setLiveAnnotation`` functions use this,
+    along with the zc.twist package, to work their magic; if you are feeling
+    adventurous, you can do the same.
 
-- You can specify other calls that should be made on the basis of the
-  result of this call.
+``zc.async.local.getDispatcher()``
+    The ``getDispatcher`` function returns the job's dispatcher.  This might
+    be used to analyze its non-persistent poll data structure, for instance
+    (described later in configuration discussions).
 
-- You can persist a reference to it, and periodically (after syncing
-  your connection with the database, which happens whenever you begin or
-  commit a transaction) check its `state` to see if it is equal to
-  zc.async.interfaces.COMPLETED.  When it is, the call has run to
-  completion, either to success or an exception.
+Let's give the first three a whirl.  We will write a function that
+examines the job's state while it is being called, and sets the state in
+an annotation, then waits for our flag to finish.
 
-- You can look at the result of the call (once COMPLETED).  It might be
-  the result you expect, or a twisted.python.failure.Failure, which is a
-  way to safely communicate exceptions across connections and machines
-  and processes.
+    >>> def annotateStatus():
+    ...     zc.async.local.setLiveAnnotation(
+    ...         'zc.async.test.status',
+    ...         zc.async.local.getJob().status)
+    ...     zc.async.local.getLiveAnnotation(
+    ...         'zc.async.test.flag', timeout=5)
+    ...     return 42
+    ...
+    >>> job = queue.put(annotateStatus)
+    >>> transaction.commit()
+    >>> def wait_for_annotation(job, key):
+    ...     reactor.time_flies(dispatcher.poll_interval) # starts thread
+    ...     for i in range(10):
+    ...         while reactor.time_passes():
+    ...             pass
+    ...         transaction.begin()
+    ...         if key in job.annotations:
+    ...             break
+    ...         time.sleep(0.1)
+    ...     else:
+    ...         print 'Timed out' + repr(dict(job.annotations))
+    ...
+    >>> wait_for_annotation(job, 'zc.async.test.status')
+    >>> job.annotations['zc.async.test.status'] == (
+    ...     zc.async.interfaces.ACTIVE)
+    True
+    >>> job.status == zc.async.interfaces.ACTIVE
+    True
 
-What's more, you can pass a Partial to the `put` call.  This means that
-you aren't constrained to simply having simple non-argument calls
-performed asynchronously, but you can pass a partial with a call,
-arguments, and keyword arguments.  Here's a quick example.  We'll use
-the same demo object, and its increase method, as our example above, but
-this time we'll include some arguments [#partial]_.
+[#stats_1]_
 
-    >>> t = transaction.begin()
-    >>> partial = dm.reactor.put(
-    ...     zc.async.partial.Partial(app['demo'].increase, 5))
+    >>> job.annotations['zc.async.test.flag'] = True
     >>> transaction.commit()
-    >>> count = time_flies(5)
-    >>> t = transaction.begin()
-    >>> app['demo'].counter
-    6
-    >>> partial = dm.reactor.put(
-    ...     zc.async.partial.Partial(app['demo'].increase, value=10))
-    >>> transaction.commit()
-    >>> count = time_flies(5)
-    >>> t = transaction.begin()
-    >>> app['demo'].counter
-    16
+    >>> wait_for(job)
+    >>> job.result
+    42
 
-Thread Calls And Reactor Calls
-==============================
+[#stats_2]_ ``getReactor`` and ``getDispatcher`` are for advanced use
+cases and are not explored further here.
 
-...
+----------
+Job Quotas
+----------
 
-Optimized Usage
-===============
+One class of asynchronous jobs are ideally serialized.  For instance,
+you may want to reduce or eliminate the chance of conflict errors when
+updating a text index.  One way to do this kind of serialization is to
+use the ``quota_names`` attribute of the job.
 
-Writing a task that doesn't need a ZODB connection
---------------------------------------------------
+For example, let's first show two non-serialized jobs running at the
+same time, and then two serialized jobs created at the same time.
+The first part of the example does not use queue_names, to show a contrast.
 
-...Twisted reactor tasks are best for this...
+For our parallel jobs, we'll do something that would create a deadlock
+if they were serial.  Notice that we are mutating the job arguments after
+creation to accomplish this, which is supported.
 
-...also could have different IPartial implementation sets self as
-ACTIVE, commits and closes connection, calls f with args, and when
-result returns, gets connection again and sets value on it, changes
-state, and performs callbacks, sets state...
+    >>> def waitForParallel(other):
+    ...     zc.async.local.setLiveAnnotation(
+    ...         'zc.async.test.flag', True)
+    ...     zc.async.local.getLiveAnnotation(
+    ...         'zc.async.test.flag', job=other, timeout=0.4, poll=0)
+    ...
+    >>> job1 = queue.put(waitForParallel)
+    >>> job2 = queue.put(waitForParallel)
+    >>> job1.args.append(job2)
+    >>> job2.args.append(job1)
+    >>> transaction.commit()
+    >>> wait_for(job1, job2)
+    >>> job1.status == zc.async.interfaces.COMPLETED
+    True
+    >>> job2.status == zc.async.interfaces.COMPLETED
+    True
+    >>> job1.result is job2.result is None
+    True
 
-Multiple ZEO workers
---------------------
+On the other hand, for our serial jobs, we'll do something that would fail
+if it were parallel.  We'll rely on ``quota_names``.  
 
-...
+Quotas verge on configuration, which is not what this section is about,
+because they must be configured on the queue.  However, they also affect
+usage, so we show them here.
 
-Catching and Fixing Errors
-==========================
+    >>> def pause(other):
+    ...     zc.async.local.setLiveAnnotation(
+    ...         'zc.async.test.flag', True)
+    ...     res = zc.async.local.getLiveAnnotation(
+    ...         'zc.async.test.flag', timeout=0.4, poll=0.1, job=other)
+    ...
+    >>> job1 = queue.put(pause)
+    >>> job2 = queue.put(imaginaryNetworkCall)
 
-...call installed during InstallTwistedWorker to check on worker...
+You can't put a name in ``quota_names`` unless the quota has been created
+in the queue.
 
-...worker finds another process already installed with same UUID; could be
-shutdown error (ghost of self) or really another process...show engineUUID...
-some discussion already in datamanager.txt...
+    >>> job1.quota_names = ('test',)
+    Traceback (most recent call last):
+    ...
+    ValueError: ('unknown quota name', 'test')
+    >>> queue.quotas.create('test')
+    >>> job1.quota_names = ('test',)
+    >>> job2.quota_names = ('test',)
 
-Gotchas
-=======
+Now we can see the two jobs being performed serially.
 
-...some callbacks may still be working when partial is completed.  Therefore
-partial put in `completed` for worker so that it can have a chance to run to
-completion (in addition to other goals, like being able to look at 
+    >>> job1.args.append(job2)
+    >>> transaction.commit()
+    >>> reactor.time_flies(dispatcher.poll_interval)
+    1
+    >>> for i in range(10):
+    ...     t = transaction.begin()
+    ...     if job1.status == zc.async.interfaces.ACTIVE:
+    ...         break
+    ...     time.sleep(0.1)
+    ... else:
+    ...     print 'TIME OUT'
+    ...
+    >>> job2.status == zc.async.interfaces.PENDING
+    True
+    >>> job2.annotations['zc.async.test.flag'] = False
+    >>> transaction.commit()
+    >>> wait_for(job1)
+    >>> wait_for(job2)
+    >>> print job1.result
+    None
+    >>> print job2.result
+    200 OK
 
-Patterns
-========
+Quotas can be configured for limits greater than one at a time, if desired.
+This may be valuable when a needed resource is only available in limited
+numbers at a time.
 
-Partials That Need a Certain Environment
-----------------------------------------
+Note that, while quotas are valuable tools for doing serialized work such as
+updating a text index, other optimization features sometimes useful for this
+sort of task, such as collapsing similar jobs, are not provided directly by
+this package. This functionality could be trivially built on top of zc.async,
+however [#idea_for_collapsing_jobs]_.
 
-...Partial that needs a certain environment: wrap partial in partial.  Outer
-partial is responsible for checking if environment is good; if so, run inner
-partial, and if not, create a new outer partial, copy over our excluded worker
-UUIDs, add this worker UUID, set perform_after to adjusted value,
-and schedule it...
+--------------
+Returning Jobs
+--------------
 
-Callbacks That Want to be Performed by a Worker
------------------------------------------------
+Our examples so far have done work directly.  What if the job wants to
+orchestrate other work?  One way this can be done is to return another
+job.  The result of the inner job will be the result of the first
+job once the inner job is finished.  This approach can be used to
+break up the work of long running processes; to be more cooperative to
+other jobs; and to make parts of a job that can be parallelized available
+to more workers.
 
-Callbacks are called immediately, whether they be within the call to the
-partial, or within the `addCallbacks` call.  If you want the job to be done
-asynchronously, make the callback with a partial.  The partial will get
-a reference to the data_manager used by the main partial.  It can create a
-partial, assign it to one of the data manager queues, and return the partial.
-Consider the following (we use a `resolve` function to let all of the pending
-calls resolve before the example proceeds [#resolve]_).
+Serialized Work
+---------------
 
-    >>> def multiply(*args):
-    ...     res = 1
-    ...     for a in args:
-    ...         res *= a
-    ...     return res
+First, consider a serialized example.  This simple pattern is one approach.
+
+    >>> def second_job(value):
+    ...     # imagine a lot of work goes on...
+    ...     return value * 2
     ...
-    >>> def doCallbackWithPartial(partial, res):
-    ...     p = zc.async.partial.Partial(multiply, 2, res)
-    ...     zc.async.interfaces.IDataManager(partial).thread.put(p)
-    ...     return p
+    >>> def first_job():
+    ...     # imagine a lot of work goes on...
+    ...     intermediate_value = 21
+    ...     queue = zc.async.local.getJob().queue
+    ...     return queue.put(zc.async.job.Job(
+    ...         second_job, intermediate_value))
     ...
-    >>> p = dm.thread.put(zc.async.partial.Partial(multiply, 3, 4))
-    >>> p_callback = p.addCallbacks(
-    ...     zc.async.partial.Partial.bind(doCallbackWithPartial))
+    >>> job = queue.put(first_job)
     >>> transaction.commit()
-    >>> resolve(p_callback)
-    >>> p.result
-    12
-    >>> p.state == zc.async.interfaces.COMPLETED
-    True
-    >>> p_callback.state == zc.async.interfaces.COMPLETED
-    True
-    >>> p_callback.result
-    24
+    >>> wait_for(job, attempts=3)
+    TIME OUT
+    >>> wait_for(job, attempts=3)
+    >>> job.result
+    42
 
-Progress Reports
-----------------
+The second_job could also have returned a job, allowing for additional
+legs.  Once the last job returns a real result, it will cascade through the
+past jobs back up to the original one.
 
-Using zc.twist.Partial, or by managing your own connections
-otherwise, you can send messages back during a long-running connection. 
-For instance, imagine you wanted to annotate a partial with progress
-messages, while not actually committing the main work.
+A different approach could have used callbacks.  Using callbacks can be
+somewhat more complicated to follow, but can allow for a cleaner
+separation of code: dividing code that does work from code that
+orchestrates the jobs.  We'll see an example of the idea below.
 
-Here's an example of one way of getting this to work.  We can use the partial's
-annotations, which are not touched by the partial code and are a separate
-persistent object, so can be changed concurrently without conflict errors.
+Parallelized Work
+-----------------
 
-We'll run the partial within a threaded worker. The callable could use
-twisted.internet.reactor.callFromThread to get the change to be made. 
-Parts of the twist.Partial machinery expect to be called in the main
-thread, where the twisted reactor is running.
+Now how can we set up parallel jobs?  There are other good ways, but we
+can describe one way that avoids potential problems with the
+current-as-of-this-writing (ZODB 3.8 and trunk) default optimistic MVCC
+serialization behavior in the ZODB.  The solution uses callbacks, which
+also allows us to cleanly divide the "work" code from the synchronization
+code, as described in the previous paragraph.
 
-    >>> import twisted.internet.reactor
-    >>> def setAnnotation(partial, annotation_key, value):
-    ...     partial.annotations[annotation_key] = value
+First, we'll define the jobs that do work.  ``job_A``, ``job_B``, and
+``job_C`` will be jobs that can be done in parallel, and
+``post_process`` will be a function that assembles the job results for a
+final result.
+
+    >>> def job_A():
+    ...     # imaginary work...
+    ...     return 7
     ...
-    >>> import threading
-    >>> import sys
-    >>> thread_lock = threading.Lock()
-    >>> main_lock = threading.Lock()
-    >>> acquired = thread_lock.acquire()
-    >>> acquired = main_lock.acquire()
-    >>> def callWithProgressReport(partial):
-    ...     print "do some work"
-    ...     print "more work"
-    ...     print "about half done"
-    ...     twisted.internet.reactor.callFromThread(zc.twist.Partial(
-    ...         setAnnotation, partial, 'zc.async.partial_txt.half_done', True))
-    ...     main_lock.release()
-    ...     acquired = thread_lock.acquire()
-    ...     return 42
+    >>> def job_B():
+    ...     # imaginary work...
+    ...     return 14
     ...
-    >>> p = dm.thread.put(zc.async.partial.Partial.bind(callWithProgressReport))
+    >>> def job_C():
+    ...     # imaginary work...
+    ...     return 21
+    ...
+    >>> def post_process(*args):
+    ...     # this callable represents one that needs to wait for the
+    ...     # parallel jobs to be done before it can process them and return
+    ...     # the final result
+    ...     return sum(args)
+    ...
+
+Now this code works with jobs to get everything done.  Note, in the
+callback function, that mutating the same object we are checking
+(job.args) is the way we are enforcing necessary serializability
+with MVCC turned on.
+
+    >>> def callback(job, result):
+    ...     job.args.append(result)
+    ...     if len(job.args) == 3: # all results are in
+    ...         zc.async.local.getJob().queue.put(job)
+    ...
+    >>> def main_job():
+    ...     job = zc.async.job.Job(post_process)
+    ...     queue = zc.async.local.getJob().queue
+    ...     for j in (job_A, job_B, job_C):
+    ...         queue.put(j).addCallback(
+    ...             zc.async.job.Job(callback, job))
+    ...     return job
+    ...
+
+That may be a bit mind-blowing at first.  The trick to catch here is that,
+because the main_job returns a job, the result of that job will become the
+result of the main_job once the returned (``post_process``) job is done.
+
+Now we'll put this in and let it cook.
+
+    >>> job = queue.put(main_job)
     >>> transaction.commit()
-    >>> ignore = time_flies(5); acquired = main_lock.acquire()
-    ... # get the reactor to kick for main call; then wait for lock release.
-    do some work
-    more work
-    about half done
-    >>> ignore = time_flies(5) # get the reactor to kick for progress report
-    >>> t = transaction.begin() # sync
-    >>> p.annotations.get('zc.async.partial_txt.half_done')
-    True
-    >>> p.state == zc.async.interfaces.ACTIVE
-    True
-    >>> thread_lock.release()
-    >>> resolve(p)
-    >>> p.result
+    >>> wait_for(job, attempts=3)
+    TIME OUT
+    >>> wait_for(job, attempts=3)
+    TIME OUT
+    >>> wait_for(job, attempts=3)
+    TIME OUT
+    >>> wait_for(job, attempts=3)
+    >>> job.result
     42
-    >>> thread_lock.release()
-    >>> main_lock.release()
 
-Expiration
-----------
+Ta-da!
 
-If you want your call to expire after a certain amount of time, keep
-track of time yourself, and return a failure if you go over.  The
-partial does not offer special support for this use case.
+For real-world usage, you'd also probably want to deal with the possibility of
+one or more of the jobs generating a Failure, among other edge cases.
 
-Stopping the Engine
 -------------------
+Returning Deferreds
+-------------------
 
-The subscriber that sets up the async engine within the Twisted reactor also
-sets up a tearDown trigger.  We can look in our faux reactor and get it.
+What if you want to do work that doesn't require a ZODB connection?  You
+can also return a Twisted deferred (twisted.internet.defer.Deferred).
+When you then ``callback`` the deferred with the eventual result, the
+agent will be responsible for setting that value on the original
+deferred and calling its callbacks.  This can be a useful trick for
+making network calls using Twisted or zc.ngi, for instance.
 
-    >>> len(faux.triggers)
-    1
-    >>> len(faux.triggers[0])
-    3
-    >>> faux.triggers[0][:2]
-    ('before', 'shutdown')
-    >>> dm.workers.values()[0].engineUUID is not None
-    True
-    >>> d = faux.triggers[0][2]()
-    >>> t = transaction.begin()
-    >>> dm.workers.values()[0].engineUUID is None
-    True
+    >>> def imaginaryNetworkCall2(deferred):
+    ...     # make a network call...
+    ...     deferred.callback('200 OK')
+    ...
+    >>> import twisted.internet.defer
+    >>> import threading
+    >>> def delegator():
+    ...     deferred = twisted.internet.defer.Deferred()
+    ...     t = threading.Thread(
+    ...         target=imaginaryNetworkCall2, args=(deferred,))
+    ...     t.run()
+    ...     return deferred
+    ...
+    >>> job = queue.put(delegator)
+    >>> transaction.commit()
+    >>> wait_for(job)
+    >>> job.result
+    '200 OK'
 
-[#tear_down]_
+Conclusion
+==========
 
-=========
-Footnotes
-=========
+This concludes our discussion of zc.async usage. The `next section`_ shows how
+to configure zc.async without Zope 3 [#stop_usage_reactor]_.
 
-.. [#uuid] UUIDs are generated by http://zesty.ca/python/uuid.html, as
-    incorporated in Python 2.5.  They are expected to be found in 
-    os.path.join(os.environ.get("INSTANCE_HOME"), 'etc', 'uuid.txt');
-    this file will be created and populated with a new UUID if it does
-    not exist.
+.. _next section: `Configuration without Zope 3`_
 
-.. [#subscribers] The zc.async.subscribers module provides two different
-    subscribers to set up a datamanager.  One subscriber expects to put
-    the object in the same database as the main application
-    (`zc.async.subscribers.basicInstallerAndNotifier`).  This is the
-    default, and should probably be used if you are a casual user.
-    
-    The other subscriber expects to put the object in a secondary
-    database, with a reference to it in the main database
-    (`zc.async.subscribers.installerAndNotifier`).  This approach keeps
-    the database churn generated by zc.async, which can be significant,
-    separate from your main data.  However, it also requires that you
-    set up two databases in your zope.conf (or equivalent, if this is
-    used outside of Zope 3).  And possibly even more onerously, it means
-    that persistent objects used for calls must either already be
-    committed, or be explicitly added to a connection; otherwise you
-    will get an InvalidObjectReference (see
-    cross-database-references.txt in the ZODB package).  The possible
-    annoyances may be worth it to someone building a more demanding
-    application.
-    
-    Again, the first subscriber is the easier to use, and is the default.
-    You can use either one (or your own).
+.. ......... ..
+.. Footnotes ..
+.. ......... ..
 
-    If you do want to use the second subscriber, here's a start on what
-    you might need to do in your zope.conf.  In a Zope without ZEO you
-    would set something like this up.
+.. [#history] The first generation, zasync, had the following goals:
 
-    <zodb>
-      <filestorage>
-        path $DATADIR/Data.fs
-      </filestorage>
-    </zodb>
-    <zodb zc.async>
-      <filestorage>
-        path $DATADIR/zc.async.fs
-      </filestorage>
-    </zodb>
+    - be scalable, so that another process or machine could do the
+      asynchronous work;
 
-    For ZEO, you could have the two databases on one server...
+    - support lengthy jobs outside of the ZODB;
+
+    - support lengthy jobs inside the ZODB;
+
+    - be recoverable, so that crashes would not lose work;
+
+    - be discoverable, so that logs and web interfaces give a view into
+      the work being done asynchronously;
+
+    - be easily extendible, to do new jobs; and
+
+    - support graceful job expiration and cancellation.
+
+    It met its goals well in some areas and adequately in others.
+
+    Based on experience with the first generation, this second
+    generation identifies several areas of improvement from the first
+    design, and adds several goals.
+
+    - Improvements
+
+      * More carefully delineate the roles of the comprising components.
+
+        The zasync design has three main components, as divided by their
+        roles: persistent deferreds, now called jobs; job queues (the
+        original zasync's "asynchronous call manager"); and dispatchers
+        (the original zasync ZEO client).  The zasync 1.x design
+        blurred the lines between the three components such that the
+        component parts could only be replaced with difficulty, if at
+        all. A goal for the 2.x design is to clearly define the role for
+        each of three components such that, for instance, a user of a
+        queue does not need to know about the dispatcher ot the agents.
+
+      * Improve scalability of asynchronous workers.
+
+        The 1.x line was initially designed for a single asynchronous
+        worker, which could be put on another machine thanks to ZEO. 
+        Tarek ZiadŽ of Nuxeo wrote zasyncdispatcher, which allowed
+        multiple asynchronous workers to accept work, allowing multiple
+        processes and multiple machines to divide and conquer. It worked
+        around the limitations of the original zasync design to provide
+        even more scalability. However, it was forced to divide up work
+        well before a given worker looks at the queue.
+
+        While dividing work earlier allows guesses and heuristics a
+        chance to predict what worker might be more free in the future,
+        a more reliable approach is to let the worker gauge whether it
+        should take a job at the time the job is taken. Perhaps the
+        worker will choose based on the worker's load, or other
+        concurrent jobs in the process, or other details. A goal for the
+        2.x line is to more directly support this type of scalability.
+
+      * Improve scalability of registering jobs.
+
+        The 1.x line initially wasn't concerned about very many
+        concurrent asynchronous requests.  When this situation was
+        encountered, it caused ConflictErrors between the worker process
+        reading the deferred queue and the code that was adding the
+        deferreds.  Thanks to Nuxeo, this problem was addressed in the
+        1.x line.  A goal for the new version is to include and improve
+        upon the 1.x solution.
+
+      * Make it even simpler to provide new jobs.
+
+        In the first version, `plugins` performed jobs.  They had a
+        specific API and they had to be configured.  A goal for the new
+        version is to require no specific API for jobs, and to not
+        require any configuration.
+
+      * Improve report information, especially through the web.
+
+        The component that the first version of zasync provided to do
+        the asynchronous work, the zasync client, provided very verbose
+        logs of the jobs done, but they were hard to read and also did
+        not have a through- the-web parallel.  Two goals for the new
+        version are to improve the usefulness of the filesystem logs and
+        to include more complete through-the-web visibility of the
+        status of the provided asynchronous clients.
+
+      * Make it easier to configure and start, especially for small
+        deployments.
+
+        A significant barrier to experimentation and deployment of the
+        1.x line was the difficulty in configuration.  The 1.x line
+        relied on ZConfig for zasync client configuration, demanding
+        non-extensible similar-yet-subtly-different .conf files like the
+        Zope conf files. The 2.x line plans to provide code that Zope 3
+        can configure to run in the same process as a standard Zope 3
+        application.  This means that development instances can start a
+        zasync quickly and easily.  It also means that processes can be
+        reallocated on the fly during production use, so that a machine
+        being used as a zasync process can quickly be converted to a web
+        server, if needed, and vice versa.  It further means that the
+        Zope web server can be used for through-the-web reports of the
+        current zasync process state.
+
+    - New goals
+
+      * Support intermediate return calls so that jobs can report back
+        how they are doing.
+
+        A frequent request from users of zasync 1.x was the ability for
+        a long- running asynchronous process to report back progress to
+        the original requester.  The 2.x line addresses this with three
+        changes:
+
+        + jobs are annotatable;
+
+        + jobs should not be modified in an asynchronous
+          worker that does work (though they may be read);
+
+        + jobs can request another job in a synchronous process
+          that annotates the job with progress status or other
+          information.
+
+        Because of relatively recent changes in ZODB--multi version
+        concurrency control--this simple pattern should not generate
+        conflict errors.
+
+      * Support time-delayed calls.
+
+        Retries and other use cases make time-delayed deferred calls
+        desirable. The new design supports these sort of calls.
+
+.. [#identifying_agent] The combination of a queue name plus a
+    dispatcher UUID plus an agent name uniquely identifies an agent.
+
+.. [#usageSetUp] We set up the configuration for our usage examples here.
+
+    You must have two adapter registrations: IConnection to
+    ITransactionManager, and IPersistent to IConnection.  We will also
+    register IPersistent to ITransactionManager because the adapter is
+    designed for it.
+
+    We also need to be able to get data manager partials for functions and
+    methods; normal partials for functions and methods; and a data manager for
+    a partial. Here are the necessary registrations.
+
+    The dispatcher will look for a UUID utility, so we also need one of these.
     
-    <filestorage 1>
-      path Data.fs
-    </filestorage>
-    <filestorage 2>
-      path zc.async.fs
-    </filestorage>
-    
-    ...and then set up ZEO clients something like this.
-    
-    <zodb>
-      <zeoclient>
-        server localhost:8100
-        storage 1
-        # ZEO client cache, in bytes
-        cache-size 20MB
-      </zeoclient>
-    </zodb>
-    <zodb zc.async>
-      <zeoclient>
-        server localhost:8100
-        storage 2
-        # ZEO client cache, in bytes
-        cache-size 20MB
-      </zeoclient>
-    </zodb>
+    The ``zc.async.configure.base`` function performs all of these
+    registrations. If you are working with zc.async without ZCML you might want
+    to use it or ``zc.async.configure.minimal`` as a convenience.
 
-.. [#setup] This is a bit more than standard set-up code for a ZODB test,
-    because it sets up a multi-database.
+    >>> import zc.async.configure
+    >>> zc.async.configure.base()
 
-    >>> from zc.queue.tests import ConflictResolvingMappingStorage
-    >>> from ZODB import DB
-    >>> class Factory(object):
-    ...     def __init__(self, name):
-    ...         self.name = name
-    ...     def open(self):
-    ...         return DB(ConflictResolvingMappingStorage('test'))
-    ...
-    >>> import zope.app.appsetup.appsetup
-    >>> db = zope.app.appsetup.appsetup.multi_database(
-    ...     (Factory('main'), Factory('zc.async')))[0][0]
+    Now we'll set up the database, and make some policy decisions.  As
+    the subsequent ``configuration`` sections discuss, some helpers are
+    available for you to set this stuff up if you'd like, though it's not too
+    onerous to do it by hand.
+
+    We'll use a test reactor that we can control.
+
+    >>> import zc.async.testing
+    >>> reactor = zc.async.testing.Reactor()
+    >>> reactor.start() # this mokeypatches datetime.datetime.now 
+
+    We need to instantiate the dispatcher with a reactor and a DB.  We
+    have the reactor, so here is the DB.  We use a FileStorage rather
+    than a MappingStorage variant typical in tests and examples because
+    we want MVCC.
+
+    >>> import ZODB.FileStorage
+    >>> storage = ZODB.FileStorage.FileStorage(
+    ...     'zc_async.fs', create=True)
+    >>> from ZODB.DB import DB 
+    >>> db = DB(storage) 
     >>> conn = db.open()
     >>> root = conn.root()
-    >>> import zope.app.folder # import rootFolder
-    >>> app = root['Application'] = zope.app.folder.rootFolder()
+
+    Now let's create the mapping of queues, and a single queue.
+
+    >>> import zc.async.queue
+    >>> import zc.async.interfaces
+    >>> mapping = root[zc.async.interfaces.KEY] = zc.async.queue.Queues()
+    >>> queue = mapping[''] = zc.async.queue.Queue()
     >>> import transaction
     >>> transaction.commit()
 
-    You must have two adapter registrations: IConnection to
-    ITransactionManager, and IPersistent to IConnection.  We will also
-    register IPersistent to ITransactionManager because the adapter is
-    designed for it.
+    Now we can instantiate, activate, and perform some reactor work in order
+    to let the dispatcher register with the queue.
 
-    >>> from zc.twist import transactionManager, connection
-    >>> import zope.component
-    >>> zope.component.provideAdapter(transactionManager)
-    >>> zope.component.provideAdapter(connection)
-    >>> import ZODB.interfaces
-    >>> zope.component.provideAdapter(
-    ...     transactionManager, adapts=(ZODB.interfaces.IConnection,))
+    >>> import zc.async.dispatcher
+    >>> dispatcher = zc.async.dispatcher.Dispatcher(db, reactor)
+    >>> dispatcher.activate()
+    >>> reactor.time_flies(1)
+    1
 
-    We need to be able to get data manager partials for functions and methods;
-    normal partials for functions and methods; and a data manager for a partial.
-    Here are the necessary registrations.
+    The UUID is set on the dispatcher.
 
     >>> import zope.component
-    >>> import types
     >>> import zc.async.interfaces
-    >>> import zc.async.partial
-    >>> import zc.async.adapters
-    >>> zope.component.provideAdapter(
-    ...     zc.async.adapters.method_to_datamanagerpartial)
-    >>> zope.component.provideAdapter(
-    ...     zc.async.adapters.function_to_datamanagerpartial)
-    >>> zope.component.provideAdapter( # partial -> datamanagerpartial
-    ...     zc.async.adapters.DataManagerPartial,
-    ...     provides=zc.async.interfaces.IDataManagerPartial)
-    >>> zope.component.provideAdapter(
-    ...     zc.async.adapters.partial_to_datamanager)
-    >>> zope.component.provideAdapter(
-    ...     zc.async.partial.Partial,
-    ...     adapts=(types.FunctionType,),
-    ...     provides=zc.async.interfaces.IPartial)
-    >>> zope.component.provideAdapter(
-    ...     zc.async.partial.Partial,
-    ...     adapts=(types.MethodType,),
-    ...     provides=zc.async.interfaces.IPartial)
-    ...
-    
-    A monkeypatch, removed in another footnote below.
+    >>> UUID = zope.component.getUtility(zc.async.interfaces.IUUID)
+    >>> dispatcher.UUID == UUID
+    True
 
-    >>> import datetime
-    >>> import pytz
-    >>> old_datetime = datetime.datetime
-    >>> def set_now(dt):
-    ...     global _now
-    ...     _now = _datetime(*dt.__reduce__()[1])
-    ...
-    >>> class _datetime(old_datetime):
-    ...     @classmethod
-    ...     def now(klass, tzinfo=None):
-    ...         if tzinfo is None:
-    ...             return _now.replace(tzinfo=None)
+    Here's an agent named 'main'
+
+    >>> import zc.async.agent
+    >>> agent = zc.async.agent.Agent()
+    >>> queue.dispatchers[dispatcher.UUID]['main'] = agent
+    >>> agent.chooser is zc.async.agent.chooseFirst
+    True
+    >>> agent.size
+    3
+    >>> transaction.commit()
+
+.. [#wait_for] This is our helper function.  It relies on the test fixtures
+    set up in the previous footnote.
+
+    >>> import time
+    >>> def wait_for(*jobs, **kwargs):
+    ...     reactor.time_flies(dispatcher.poll_interval) # starts thread
+    ...     # now we wait for the thread
+    ...     for i in range(kwargs.get('attempts', 10)):
+    ...         while reactor.time_passes():
+    ...             pass
+    ...         transaction.begin()
+    ...         for j in jobs:
+    ...             if j.status != zc.async.interfaces.COMPLETED:
+    ...                 break
     ...         else:
-    ...             return _now.astimezone(tzinfo)
-    ...     def astimezone(self, tzinfo):
-    ...         return _datetime(
-    ...             *super(_datetime,self).astimezone(tzinfo).__reduce__()[1])
-    ...     def replace(self, *args, **kwargs):
-    ...         return _datetime(
-    ...             *super(_datetime,self).replace(
-    ...                 *args, **kwargs).__reduce__()[1])
-    ...     def __repr__(self):
-    ...         raw = super(_datetime, self).__repr__()
-    ...         return "datetime.datetime%s" % (
-    ...             raw[raw.index('('):],)
-    ...     def __reduce__(self):
-    ...         return (argh, super(_datetime, self).__reduce__()[1])
-    >>> def argh(*args, **kwargs):
-    ...     return _datetime(*args, **kwargs)
+    ...             break
+    ...         time.sleep(0.1)
+    ...     else:
+    ...         print 'TIME OUT'
     ...
-    >>> datetime.datetime = _datetime
-    >>> _start = _now = datetime.datetime(
-    ...     2006, 8, 10, 15, 44, 22, 211, pytz.UTC)
 
-    We monkeypatch twisted.internet.reactor (and replace it below).  
+.. [#commit_for_multidatabase] We commit before we do the next step as a
+    good practice, in case the queue is from a different database than
+    the root.  See the configuration sections for a discussion about
+    why putting the queue in another database might be a good idea. 
+    
+    Rather than committing the transaction,
+    ``root._p_jar.add(root['demo'])`` would also accomplish the same
+    thing from a multi-database perspective, without a commit.  It was
+    not used in the example because the ``transaction.commit()`` the author
+    judged it to be less jarring to the reader.  If you are down here
+    reading this footnote, maybe the author was wrong. :-)
 
-    >>> import twisted.internet.reactor
-    >>> import threading
-    >>> import bisect
-    >>> class FauxReactor(object):
-    ...     def __init__(self):
-    ...         self.time = 0
-    ...         self.calls = []
-    ...         self.triggers = []
-    ...         self._lock = threading.Lock()
-    ...     def callLater(self, delay, callable, *args, **kw):
-    ...         res = (delay + self.time, callable, args, kw)
-    ...         self._lock.acquire()
-    ...         bisect.insort(self.calls, res)
-    ...         self._lock.release()
-    ...         # normally we're supposed to return something but not needed
-    ...     def callFromThread(self, callable, *args, **kw):
-    ...         self._lock.acquire()
-    ...         bisect.insort(
-    ...             self.calls,
-    ...             (self.time, callable, args, kw))
-    ...         self._lock.release()
-    ...     def addSystemEventTrigger(self, *args):
-    ...         self.triggers.append(args) # 'before', 'shutdown', callable
-    ...     def _get_next(self, end):
-    ...         self._lock.acquire()
-    ...         try:
-    ...             if self.calls and self.calls[0][0] <= end:
-    ...                 return self.calls.pop(0)
-    ...         finally:
-    ...             self._lock.release()
-    ...     def time_flies(self, time):
-    ...         global _now
-    ...         end = self.time + time
-    ...         ct = 0
-    ...         next = self._get_next(end)
-    ...         while next is not None:
-    ...             self.time, callable, args, kw = next
-    ...             _now = _datetime(
-    ...                 *(_start + datetime.timedelta(
-    ...                     seconds=self.time)).__reduce__()[1])
-    ...             callable(*args, **kw) # normally this would get try...except
-    ...             ct += 1
-    ...             next = self._get_next(end)
-    ...         self.time = end
-    ...         return ct
-    ...     def time_passes(self):
-    ...         next = self._get_next(self.time)
-    ...         if next is not None:
-    ...             self.time, callable, args, kw = next
-    ...             callable(*args, **kw)
-    ...             return True
-    ...         return False
-    ...
-    >>> faux = FauxReactor()
-    >>> oldCallLater = twisted.internet.reactor.callLater
-    >>> oldCallFromThread = twisted.internet.reactor.callFromThread
-    >>> oldAddSystemEventTrigger = (
-    ...     twisted.internet.reactor.addSystemEventTrigger)
-    >>> twisted.internet.reactor.callLater = faux.callLater
-    >>> twisted.internet.reactor.callFromThread = faux.callFromThread
-    >>> twisted.internet.reactor.addSystemEventTrigger = (
-    ...     faux.addSystemEventTrigger)
-    >>> time_flies = faux.time_flies
-    >>> time_passes = faux.time_passes
+.. [#already_passed]
 
-.. [#handlers] In the second footnote above, the text describes two
-    available subscribers.  When this documentation is run as a test, it
-    is run twice, once with each.  To accomodate this, in our example
-    below we appear to pull the "installerAndNotifier" out of the air:
-    it is installed as a global when the test is run.
+    >>> t = transaction.begin()
+    >>> job = queue.put(
+    ...     send_message, datetime.datetime(2006, 8, 10, 15, tzinfo=pytz.UTC))
+    >>> transaction.commit()
+    >>> wait_for(job)
+    imagine this sent a message to another machine
+    
+    It's worth noting that this situation consitutes a small exception
+    in the handling of scheduled calls.  Scheduled calls usually get
+    preference when jobs are handed out over normal non-scheduled "as soon as
+    possible" jobs.  However, setting the begin_after date to an earlier
+    time puts the job at the end of the (usually) FIFO queue of non-scheduled
+    tasks: it is treated exactly as if the date had not been specified.
 
-.. [#partial] The Partial class can take arguments and keyword arguments
+.. [#already_passed_timed_out]
+
+    >>> t = transaction.begin()
+    >>> job = queue.put(
+    ...     send_message, datetime.datetime(2006, 7, 21, 12, tzinfo=pytz.UTC))
+    >>> transaction.commit()
+    >>> wait_for(job)
+    >>> job.result
+    <zc.twist.Failure zc.async.interfaces.AbortedError>
+    >>> import sys
+    >>> job.result.printTraceback(sys.stdout) # doctest: +NORMALIZE_WHITESPACE
+    Traceback (most recent call last):
+    Failure: zc.async.interfaces.AbortedError:
+
+.. [#job] The Job class can take arguments and keyword arguments
     for the wrapped callable at call time as well, similar to Python
-    2.5's `partial`.  This will be important when we use the Partial as
-    a callback.  For this use case, though, realize that the partial
+    2.5's `partial`.  This will be important when we use the Job as
+    a callback.  For this use case, though, realize that the job
     will be called with no arguments, so you must supply all necessary
     arguments for the callable on creation time.
 
-.. [#resolve]
+.. [#setLiveAnnotation]  Here's the real rule, which is more complex.
+    *Do not send non-persistent mutables or a persistent.Persistent
+    object without a connection, unless you do not refer to it again in
+    the current job.*
 
-    >>> import time
-    >>> import ZODB.POSException
-    >>> def resolve(p):
-    ...     for i in range(100):
-    ...         t = transaction.begin()
-    ...         ignore = time_flies(5)
-    ...         time.sleep(0)
-    ...         t = transaction.begin()
-    ...         try:
-    ...             if (len(dm.thread) == 0 and
-    ...                 len(dm.workers.values()[0].thread) == 0 and
-    ...                 p.state == zc.async.interfaces.COMPLETED): 
-    ...                 break
-    ...         except ZODB.POSException.ReadConflictError:
-    ...             pass
-    ...     else:
-    ...         print 'Timed out'
+.. [#getLiveAnnotation] Here's the real rule. *To prevent surprising
+    errors, do not request an annotation that might be a persistent
+    object.*
 
-.. [#tear_down]
+.. [#stats_1] The dispatcher has a getStatistics method.  It also shows the
+    fact that there is an active task.
 
-    >>> twisted.internet.reactor.callLater = oldCallLater
-    >>> twisted.internet.reactor.callFromThread = oldCallFromThread
-    >>> twisted.internet.reactor.addSystemEventTrigger = (
-    ...     oldAddSystemEventTrigger)
-    >>> datetime.datetime = old_datetime
-    >>> import zc.async.engine
-    >>> engine = zc.async.engine.engines[worker.UUID]
-    >>> while 1: # make sure all the threads are dead before we close down
-    ...     for t in engine._threads:
-    ...         if t.isAlive():
-    ...             break
-    ...     else:
-    ...         break
-    ...
+    >>> import pprint
+    >>> pprint.pprint(dispatcher.getStatistics()) # doctest: +ELLIPSIS
+    {'failed': 2,
+     'longest active': ('\x00...', 'unnamed'),
+     'longest failed': ('\x00...', 'unnamed'),
+     'longest successful': ('\x00...', 'unnamed'),
+     'shortest active': ('\x00\...', 'unnamed'),
+     'shortest failed': ('\x00\...', 'unnamed'),
+     'shortest successful': ('\x00...', 'unnamed'),
+     'started': 12,
+     'statistics end': datetime.datetime(2006, 8, 10, 15, 44, 22, 211),
+     'statistics start': datetime.datetime(2006, 8, 10, 15, 56, 47, 211),
+     'successful': 9,
+     'unknown': 0}
+
+    We can also see the active job with ``getActiveJobIds``
+    
+    >>> job_ids = dispatcher.getActiveJobIds()
+    >>> len(job_ids)
+    1
+    >>> info = dispatcher.getJobInfo(*job_ids[0])
+    >>> pprint.pprint(info) # doctest: +ELLIPSIS
+    {'call': "<zc.async.job.Job (oid ..., db 'unnamed') ``zc.async.doctest_test.annotateStatus()``>",
+     'completed': None,
+     'failed': False,
+     'poll id': ...,
+     'quota names': (),
+     'result': None,
+     'started': datetime.datetime(...),
+     'thread': ...}
+     >>> info['thread'] is not None
+     True
+     >>> info['poll id'] is not None
+     True
+
+
+.. [#stats_2] Now the task is done, as the stats reflect.
+
+    >>> pprint.pprint(dispatcher.getStatistics()) # doctest: +ELLIPSIS
+    {'failed': 2,
+     'longest active': None,
+     'longest failed': ('\x00...', 'unnamed'),
+     'longest successful': ('\x00...', 'unnamed'),
+     'shortest active': None,
+     'shortest failed': ('\x00\...', 'unnamed'),
+     'shortest successful': ('\x00...', 'unnamed'),
+     'started': 10,
+     'statistics end': datetime.datetime(2006, 8, 10, 15, 46, 52, 211),
+     'statistics start': datetime.datetime(2006, 8, 10, 15, 56, 52, 211),
+     'successful': 8,
+     'unknown': 0}
+
+    Although, wait a second--the 'statistics end', the 'started', and the
+    'successful' values have changed!  Why?
+    
+    To keep memory from rolling out of control, the dispatcher by default
+    only keeps 10 to 12.5 minutes worth of poll information in memory.  For
+    the rest, keep logs and look at them (...and rotate them!).
+
+    The ``getActiveJobIds`` list shows the new task--which is completed, but
+    not as of the last poll, so it's still in the list.
+    
+    >>> job_ids = dispatcher.getActiveJobIds()
+    >>> len(job_ids)
+    1
+    >>> info = dispatcher.getJobInfo(*job_ids[0])
+    >>> pprint.pprint(info) # doctest: +ELLIPSIS
+    {'call': "<zc.async.job.Job (oid ..., db 'unnamed') ``zc.async.doctest_test.annotateStatus()``>",
+     'completed': datetime.datetime(...),
+     'failed': False,
+     'poll id': ...,
+     'quota names': (),
+     'result': '42',
+     'started': datetime.datetime(...),
+     'thread': ...}
+     >>> info['thread'] is not None
+     True
+     >>> info['poll id'] is not None
+     True
+
+.. [#idea_for_collapsing_jobs] For instance, here is one approach.  Imagine
+    you are queueing the job of indexing documents. If the same document has a
+    request to index, the job could simply walk the queue and remove (``pull``)
+    similar tasks, perhaps aggregating any necessary data. Since the jobs are
+    serial because of a quota, no other worker should be trying to work on
+    those jobs.
+
+.. [#stop_usage_reactor] 
+
+    >>> pprint.pprint(dispatcher.getStatistics()) # doctest: +ELLIPSIS
+    {'failed': 2,
+     'longest active': None,
+     'longest failed': ('\x00...', 'unnamed'),
+     'longest successful': ('\x00...', 'unnamed'),
+     'shortest active': None,
+     'shortest failed': ('\x00\...', 'unnamed'),
+     'shortest successful': ('\x00...', 'unnamed'),
+     'started': 22,
+     'statistics end': datetime.datetime(2006, 8, 10, 15, 46, 52, 211),
+     'statistics start': datetime.datetime(2006, 8, 10, 15, 57, 47, 211),
+     'successful': 20,
+     'unknown': 0}
+    >>> reactor.stop()

Copied: zc.async/trunk/src/zc/async/README_2.txt (from rev 85211, zc.async/branches/dev/src/zc/async/README_2.txt)
===================================================================
--- zc.async/trunk/src/zc/async/README_2.txt	                        (rev 0)
+++ zc.async/trunk/src/zc/async/README_2.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,747 @@
+============================
+Configuration without Zope 3
+============================
+
+This section discusses setting up zc.async without Zope 3. Since Zope 3 is
+ill-defined, we will be more specific: this describes setting up zc.async
+without ZCML, without any zope.app packages, and with as few dependencies as
+possible. A casual way of describing the dependencies is "ZODB, Twisted and
+zope.component," though we directly depend on some smaller packages and
+indirectly on others [#specific_dependencies]_.
+
+You may have one or two kinds of configurations for your software using
+zc.async. The simplest approach is to have all processes able both to put items
+in queues, and to perform them with a dispatcher. You can then use on-the-fly
+ZODB configuration to determine what jobs, if any, each process' dispatcher
+performs. If a dispatcher has no agents in a given queue, as we'll discuss
+below, the dispatcher will not perform any job for that queue.
+
+However, if you want to create some processes that can only put items in a
+queue, and do not have a dispatcher at all, that is easy to do. We'll call this
+a "client" process, and the full configuration a "client/server process". As
+you might expect, the configuration of a client process is a subset of the
+configuration of the client/server process.
+
+We will first describe setting up a client, non-dispatcher process, in which
+you only can put items in a zc.async queue; and then describe setting up a
+dispatcher client/server process that can be used both to request and to
+perform jobs.
+
+Configuring a Client Process
+============================
+
+Generally, zc.async configuration has four basic parts: component
+registrations, ZODB setup, ZODB configuration, and process configuration.  For
+a client process, we'll discuss required component registrations; ZODB
+setup;  minimal ZODB configuration; process configuration; and then circle
+back around for some optional component registrations.
+
+--------------------------------
+Required Component Registrations
+--------------------------------
+
+The required registrations can be installed for you by the
+``zc.async.configure.base`` function. Most other examples in this package,
+such as those in the `Usage`_ section, use this in their
+test setup. 
+
+Again, for a quick start, you might just want to use the helper
+``zc.async.configure.base`` function, and move on to the `Required ZODB Set
+Up`_ section below.
+
+Here, though, we will go over each required registration to briefly explain
+what they are.
+
+You must have three adapter registrations: IConnection to
+ITransactionManager, IPersistent to IConnection, and IPersistent to
+ITransactionManager.
+
+The ``zc.twist`` package provides all of these adapters.  However,
+zope.app.keyreference also provides a version of the ``connection`` adapter
+that is identical or very similar, and that should work fine if you are 
+already using that package in your application.
+
+    >>> import zc.twist
+    >>> import zope.component
+    >>> zope.component.provideAdapter(zc.twist.transactionManager)
+    >>> zope.component.provideAdapter(zc.twist.connection)
+    >>> import ZODB.interfaces
+    >>> zope.component.provideAdapter(
+    ...     zc.twist.transactionManager, adapts=(ZODB.interfaces.IConnection,))
+
+We also need to be able to adapt functions and methods to jobs.  The
+zc.async.job.Job class is the expected implementation.
+
+    >>> import types
+    >>> import zc.async.interfaces
+    >>> import zc.async.job
+    >>> zope.component.provideAdapter(
+    ...     zc.async.job.Job,
+    ...     adapts=(types.FunctionType,),
+    ...     provides=zc.async.interfaces.IJob)
+    >>> zope.component.provideAdapter(
+    ...     zc.async.job.Job,
+    ...     adapts=(types.MethodType,),
+    ...     provides=zc.async.interfaces.IJob)
+    >>> zope.component.provideAdapter( # optional, rarely used
+    ...     zc.async.job.Job,
+    ...     adapts=(zc.twist.METHOD_WRAPPER_TYPE,),
+    ...     provides=zc.async.interfaces.IJob)
+
+The queue looks for the UUID utility to set the ``assignerUUID`` job attribute,
+and may want to use it to optionally filter jobs during ``claim`` in the
+future. Also, the dispatcher will look for a UUID utility if a UUID is not
+specifically provided to its constructor.
+    
+    >>> from zc.async.instanceuuid import UUID
+    >>> zope.component.provideUtility(
+    ...     UUID, zc.async.interfaces.IUUID, '')
+
+The UUID we register here is a UUID of the instance, which is expected
+to uniquely identify the process when in production. It is stored in
+the file specified by the ``ZC_ASYNC_UUID`` environment variable (or in
+``os.join(os.getcwd(), 'uuid.txt')`` if this is not specified, for easy
+initial experimentation with the package).
+
+    >>> import uuid
+    >>> import os
+    >>> f = open(os.environ["ZC_ASYNC_UUID"])
+    >>> uuid_hex = f.readline().strip()
+    >>> f.close()
+    >>> uuid = uuid.UUID(uuid_hex)
+    >>> UUID == uuid
+    True
+
+The uuid.txt file is intended to stay in the instance home as a persistent
+identifier.
+
+Again, all of the required registrations above can be accomplished quickly with
+``zc.async.configure.base``.
+
+--------------------
+Required ZODB Set Up
+--------------------
+
+On a basic level, zc.async needs a setup that supports good conflict
+resolution.  Most or all production ZODB storages now have the necessary
+APIs to support MVCC.
+
+Of course, if you want to run multiple processes, you need ZEO. You should also
+then make sure that your ZEO server installation has all the code that includes
+conflict resolution, such as zc.queue, because, as of this writing, conflict
+resolution happens in the ZEO server, not in clients.
+
+A more subtle decision is whether to use multiple databases.  The zc.async
+dispatcher can generate a lot of database churn.  It may be wise to put the
+queue in a separate database from your content database(s).  
+
+The downsides to this option include the fact that you must be careful to
+specify to which database objects belong; and that broken cross-database
+references are not handled gracefully in the ZODB as of this writing.
+
+We will use multiple databases for our example here, because we are trying to
+demonstrate production-quality examples. We will show this with a pure-Python
+approach, rather than the ZConfig approach usually used by Zope. If you know
+ZConfig, that will be a reasonable approach as well; see zope.app.appsetup
+for how Zope uses ZConfig to set up multidatabases.
+
+In our example, we create two file storages. In production, you might likely
+use ZEO; hooking ClientStorage up instead of FileStorage should be straight
+forward.
+
+    >>> databases = {}
+    >>> import ZODB.FileStorage
+    >>> storage = ZODB.FileStorage.FileStorage(
+    ...     'main.fs', create=True)
+    
+    >>> async_storage = ZODB.FileStorage.FileStorage(
+    ...     'async.fs', create=True)
+
+    >>> from ZODB.DB import DB 
+    >>> databases[''] = db = DB(storage)
+    >>> databases['async'] = async_db = DB(async_storage)
+    >>> async_db.databases = db.databases = databases
+    >>> db.database_name = ''
+    >>> async_db.database_name = 'async'
+    >>> conn = db.open()
+    >>> root = conn.root()
+
+------------------
+ZODB Configuration
+------------------
+
+A Queue
+-------
+
+All we must have for a client to be able to put jobs in a queue is...a queue.
+
+For a quick start, the ``zc.async.subscribers`` module provides a subscriber to
+a DatabaseOpened event that does the right dance. See
+``multidb_queue_installer`` and ``queue_installer`` in that module, and you can
+see that in use in `Configuration with Zope 3`_. For now, though, we're taking
+things step by step and explaining what's going on.
+
+Dispatchers look for queues in a mapping off the root of the database in 
+a key defined as a constant: zc.async.interfaces.KEY.  This mapping should
+generally be a zc.async.queue.Queues object.
+
+If we were not using a multi-database for our example, we could simply install
+the queues mapping with this line:
+``root[zc.async.interfaces.KEY] = zc.async.queue.Queues()``.  We will need
+something a bit more baroque.  We will add the queues mapping to the 'async'
+database, and then make it available in the main database ('') with the proper
+key.
+
+    >>> conn2 = conn.get_connection('async')
+    >>> import zc.async.queue
+    >>> queues = conn2.root()['mounted_queues'] = zc.async.queue.Queues()
+
+Note that the 'mounted_queues' key in the async database is arbitrary:
+what we care about is the key in the database that the dispatcher will
+see.
+
+Now we add the object explicitly to conn2, so that the ZODB will know the
+"real" database in which the object lives, even though it will be also
+accessible from the main database.
+
+    >>> conn2.add(queues)
+    >>> root[zc.async.interfaces.KEY] = queues
+    >>> import transaction
+    >>> transaction.commit()
+
+Now we need to put a queue in the queues collection.  We can have more than
+one, as discussed below, but we suggest a convention of the primary queue
+being available in a key of '' (empty string).
+
+    >>> queue = queues[''] = zc.async.queue.Queue()
+    >>> transaction.commit()
+
+Quotas
+------
+
+We touched on quotas in the usage section.  Some jobs will need to
+access resoources that are shared across processes.  A central data
+structure such as an index in the ZODB is a prime example, but other
+examples might include a network service that only allows a certain
+number of concurrent connections.  These scenarios can be helped by
+quotas.
+
+Quotas are demonstrated in the usage section.  For configuration, you
+should know these characteristics:
+
+- you cannot add a job with a quota name that is not defined in the
+  queue [#undefined_quota_name]_;
+
+- you cannot add a quota name to a job in a queue if the quota name is not
+  defined in the queue [#no_mutation_to_undefined]_;
+
+- you can create and remove quotas on the queue [#create_remove_quotas]_;
+
+- you can remove quotas if pending jobs have their quota names--the quota name
+  is then ignored [#remove_quotas]_;
+
+- quotas default to a size of 1 [#default_size]_;
+
+- this can be changed at creation or later [#change_size]_; and
+
+- decreasing the size of a quota while the old quota size is filled will
+  not affect the currently running jobs [#decreasing_affects_future]_.
+
+Multiple Queues
+---------------
+
+Since we put our queues in a mapping of them, we can also create multiple
+queues.  This can make some scenarios more convenient and simpler to reason
+about.  For instance, while you might have agents filtering jobs as we
+describe above, it might be simpler to say that you have a queue for one kind
+of job--say, processing a video file or an audio file--and a queue for other
+kinds of jobs.  Then it is easy and obvious to set up simple FIFO agents
+as desired for different dispatchers.  The same kind of logic could be
+accomplished with agents, but it is easier to picture the multiple queues.
+
+Another use case for multiple queues might be for specialized queues, like ones
+that broadcast jobs. You could write a queue subclass that broadcasts copies of
+jobs they get to all dispatchers, aggregating results.  This could be used to
+send "events" to all processes, or to gather statistics on certain processes,
+and so on.
+
+Generally, any time the application wants to be able to assert a kind of job
+rather than letting the agents decide what to do, having separate queues is
+a reasonable tool.
+
+---------------------
+Process Configuration
+---------------------
+
+Daemonization
+-------------
+
+You often want to daemonize your software, so that you can restart it if
+there's a problem, keep track of it and monitor it, and so on.  ZDaemon
+(http://pypi.python.org/pypi/zdaemon) and Supervisor (http://supervisord.org/)
+are two fairly simple-to-use ways of doing this for both client and
+client/server processes. If your main application can be packaged as a
+setuptools distribution (egg or source release or even development egg) then
+you can have your main application as a zc.async client and your dispatchers
+running a separate zc.async-only main loop that simply includes your main
+application as a dependency, so the necessary software is around. You may have
+to do a bit more configuration on the client/server side to mimic global
+registries such as zope.component registrations and so on between the client
+and the client/servers, but this shouldn't be too bad.
+
+UUID File Location
+------------------
+
+As discussed above, the instanceuuid module will look for an environmental
+variable ``ZC_ASYNC_UUID`` to find the file name to use, and failing that will
+use ``os.join(os.getcwd(), 'uuid.txt')``.  It's worth noting that daemonization
+tools such as ZDaemon and Supervisor (3 or greater) make setting environment
+values for child processes an easy (and repeatable) configuration file setting.
+
+-----------------------------------------------------
+Optional Component Registrations for a Client Process
+-----------------------------------------------------
+
+The only optional component registration potentially valuable for client
+instances that only put jobs in the queue is registering an adapter from
+persistent objects to a queue.  The ``zc.async.queue.getDefaultQueue`` adapter
+does this for an adapter to the queue named '' (empty string).  Since that's
+what we have from the `ZODB Configuration`_ above section, we'll register it.
+Writing your own adapter is trivial, as you can see if you look at the
+implementation of this function.
+
+    >>> zope.component.provideAdapter(zc.async.queue.getDefaultQueue)
+    >>> zc.async.interfaces.IQueue(root) is queue
+    True
+
+Configuring a Client/Server Process
+===================================
+
+Configuring a client/server process--something that includes a running
+dispatcher--means doing everything described above, plus a bit more.  You
+need to set up and start a reactor and dispatcher; configure agents as desired
+to get the dispatcher to do some work; and optionally configure logging.
+
+For a quick start, the ``zc.async.subscribers`` module has some conveniences
+to start a threaded reactor and dispatcher, and to install agents.  You might
+want to look at those to get started.  They are also used in the Zope 3
+configuration (README_3).  Meanwhile, this document continues to go
+step-by-step instead, to try and explain the components and configuration.
+
+Even though it seems reasonable to first start a dispatcher and then set up its
+agents, we'll first define a subscriber to create an agent. As we'll see below,
+the dispatcher fires an event when it registers with a queue, and another when
+it activates the queue. These events give you the opportunity to register
+subscribers to add one or more agents to a queue, to tell the dispatcher what
+jobs to perform. zc.async.agent.addMainAgentActivationHandler is a reasonable
+starter: it adds a single agent named 'main' if one does not exist. The agent
+has a simple indiscriminate FIFO policy for the queue. If you want to write
+your own subscriber, look at this, or at the more generic subscriber in the
+``zc.async.subscribers`` module.
+
+Agents are an important part of the ZODB configuration, and so are described
+more in depth below.
+
+    >>> import zc.async.agent
+    >>> zope.component.provideHandler(
+    ...     zc.async.agent.addMainAgentActivationHandler)
+
+This subscriber is registered for the IDispatcherActivated event; another
+approach might use the IDispatcherRegistered event.
+
+-----------------------
+Starting the Dispatcher
+-----------------------
+
+Now we can start the reactor, and start the dispatcher.
+In some applications this may be done with an event subscriber to
+DatabaseOpened, as is done in ``zc.async.subscribers``. Here, we will do it
+inline.
+
+Any object that conforms to the specification of zc.async.interfaces.IReactor
+will be usable by the dispatcher.  For our example, we will use our own instance
+of the Twisted select-based reactor running in a separate thread.  This is
+separate from the Twisted reator installed in twisted.internet.reactor, and
+so this approach can be used with an application that does not otherwise use
+Twisted (for instance, a Zope application using the "classic" zope publisher).
+
+The testing module also has a reactor on which the `Usage` section relies, if
+you would like to see a minimal contract.
+
+Configuring the basics is fairly simple, as we'll see in a moment.  The
+trickiest part is to handle signals cleanly. It is also optional! The
+dispatcher will eventually figure out that there was not a clean shut down
+before and take care of it. Here, though, essentially as an optimization, we
+install signal handlers in the main thread using ``reactor._handleSignals``.
+``reactor._handleSignals`` may work in some real-world applications, but if
+your application already needs to handle signals you may need a more careful
+approach. Again, see ``zc.async.subscribers`` for some options you can explore.
+
+    >>> import twisted.internet.selectreactor
+    >>> reactor = twisted.internet.selectreactor.SelectReactor()
+    >>> reactor._handleSignals()
+
+Now we are ready to instantiate our dispatcher.
+
+    >>> dispatcher = zc.async.dispatcher.Dispatcher(db, reactor)
+
+Notice it has the uuid defined in instanceuuid.
+
+    >>> dispatcher.UUID == UUID
+    True
+
+Now we can start the reactor and the dispatcher in a thread.
+
+    >>> import threading
+    >>> def start():
+    ...     dispatcher.activate()
+    ...     reactor.run(installSignalHandlers=0)
+    ...
+    >>> thread = threading.Thread(target=start)
+    >>> thread.setDaemon(True)
+
+    >>> thread.start()
+
+The dispatcher should be starting up now.  Let's wait for it to activate.
+We're using a test convenience, get_poll, defined in the footnotes
+[#get_poll]_.
+
+    >>> poll = get_poll(0)
+
+We're off!  The events have been fired for registering and activating the
+dispatcher.  Therefore, our subscriber to add our agent has fired.
+
+We need to begin our transaction to synchronize our view of the database.
+
+    >>> t = transaction.begin()
+
+We get the collection of dispatcher agents from the queue, using the UUID.
+
+    >>> dispatcher_agents = queue.dispatchers[UUID]
+
+It has one agent--the one placed by our subscriber.
+
+    >>> dispatcher_agents.keys()
+    ['main']
+    >>> agent = dispatcher_agents['main']
+
+Now we have our agent!  But...what is it [#stop_config_reactor]_?
+
+------
+Agents
+------
+
+Agents are the way you control what a dispatcher's worker threads do.  They
+pick the jobs and assign them to their dispatcher when the dispatcher asks.
+
+*If a dispatcher does not have any agents in a give queue, it will not perform
+any tasks for that queue.*
+
+We currently have an agent that simply asks for the next available FIFO job.
+We are using an agent implementation that allows you to specify a callable to
+choose the job.  That callable is now zc.async.agent.chooseFirst.
+
+    >>> agent.chooser is zc.async.agent.chooseFirst
+    True
+
+Here's the entire implementation of that function::
+
+    def chooseFirst(agent):
+        return agent.queue.claim()
+
+What would another agent do?  Well, it might pass a filter function to
+``claim``.  This function takes a job and returns a value evaluated as a
+boolean.  For instance, let's say we always wanted a certain number of
+threads available for working on a particular call; for the purpose of
+example, we'll use ``operator.mul``, though a more real-world example
+might be a network call or a particular call in your application.
+
+    >>> import operator
+    >>> def chooseMul(agent):
+    ...     return agent.queue.claim(lambda job: job.callable is operator.mul)
+    ...
+
+Another variant would prefer operator.mul, but if one is not in the queue,
+it will take any.
+
+    >>> def preferMul(agent):
+    ...     res = agent.queue.claim(lambda job: job.callable is operator.mul)
+    ...     if res is None:
+    ...         res = agent.queue.claim()
+    ...     return res
+    ...
+
+Other approaches might look at the current jobs in the agent, or the agent's
+dispatcher, and decide what jobs to prefer on that basis.  The agent should
+support many ideas.
+
+Let's set up another agent, in addition to the ``chooseFirst`` one, that has
+the ``preferMul`` policy.
+
+    >>> agent2 = dispatcher_agents['mul'] = zc.async.agent.Agent(preferMul)
+
+Another characteristic of agents is that they specify how many jobs they
+should pick at a time.  The dispatcher actually adjusts the size of the
+ZODB connection pool to accommodate its agents' size.  The default is 3.
+
+    >>> agent.size
+    3
+    >>> agent2.size
+    3
+
+We can change that at creation or later.
+
+Finally, it's worth noting that agents contain the jobs that are currently
+worked on by the dispatcher, on their behalf; and have a ``completed``
+collection of the more recent completed jobs, beginning with the most recently
+completed job.
+
+----------------------
+Logging and Monitoring
+----------------------
+
+Logs are sent to the ``zc.async.events`` log for big events, like startup and
+shutdown, and errors.  Poll and job logs are sent to ``zc.async.trace``.
+Confugure the standard Python logging module as usual to send these logs where
+you need.  Be sure to auto-rotate the trace logs.
+
+The package supports monitoring using zc.z3monitor, but using this package
+includes more Zope 3 dependencies, so it is not included here. If you would
+like to use it, see monitor.txt in the package and our next section:
+`Configuration with Zope 3`_.
+
+    >>> reactor.stop()
+
+.. ......... ..
+.. Footnotes ..
+.. ......... ..
+
+.. [#specific_dependencies]  More specifically, as of this writing,
+    these are the minimal egg dependencies (including indirect
+    dependencies):
+
+    - pytz
+        A Python time zone library
+    
+    - rwproperty
+        A small package of descriptor conveniences
+    
+    - uuid
+        The uuid module included in Python 2.5
+    
+    - zc.dict
+        A ZODB-aware dict implementation based on BTrees.
+    
+    - zc.queue
+        A ZODB-aware queue
+    
+    - zc.twist
+        Conveniences for working with Twisted and the ZODB
+    
+    - twisted
+        The Twisted internet library.
+    
+    - ZConfig
+        A general configuration package coming from the Zope project with which
+        the ZODB tests.
+    
+    - zdaemon
+        A general daemon tool coming from the Zope project.
+    
+    - ZODB3
+        The Zope Object Database.
+    
+    - zope.bforest
+        Aggregations of multiple BTrees into a single dict-like structure,
+        reasonable for rotating data structures, among other purposes.
+    
+    - zope.component
+        A way to hook together code by contract.
+    
+    - zope.deferredimport
+        A way to defer imports in Python packages, often to prevent circular
+        import problems.
+    
+    - zope.deprecation
+        A small framework for deprecating features.
+    
+    - zope.event
+        An exceedingly small event framework that derives its power from
+        zope.component.
+    
+    - zope.i18nmessageid
+        A way to specify strings to be translated.
+    
+    - zope.interface
+        A way to specify code contracts and other data structures.
+    
+    - zope.proxy
+        A way to proxy other Python objects.
+    
+    - zope.testing
+        Testing extensions and helpers.
+
+    The next section, `Configuration With Zope 3`_, still tries to limit
+    dependencies--we only rely on additional packages zc.z3monitor, simplejson,
+    and zope.app.appsetup ourselves--but as of this writing zope.app.appsetup
+    ends up dragging in a large chunk of zope.app.* packages. Hopefully that
+    will be refactored in Zope itself, and our full Zope 3 configuration can
+    benefit from the reduced indirect dependencies.
+
+.. [#undefined_quota_name]
+
+    >>> import operator
+    >>> import zc.async.job
+    >>> job = zc.async.job.Job(operator.mul, 5, 2)
+    >>> job.quota_names = ['content catalog']
+    >>> job.quota_names
+    ('content catalog',)
+    >>> queue.put(job)
+    Traceback (most recent call last):
+    ...
+    ValueError: ('unknown quota name', 'content catalog')
+    >>> len(queue)
+    0
+
+.. [#no_mutation_to_undefined]
+
+    >>> job.quota_names = ()
+    >>> job is queue.put(job)
+    True
+    >>> job.quota_names = ('content catalog',)
+    Traceback (most recent call last):
+    ...
+    ValueError: ('unknown quota name', 'content catalog')
+    >>> job.quota_names
+    ()
+
+.. [#create_remove_quotas]
+
+    >>> list(queue.quotas)
+    []
+    >>> queue.quotas.create('testing')
+    >>> list(queue.quotas)
+    ['testing']
+    >>> queue.quotas.remove('testing')
+    >>> list(queue.quotas)
+    []
+
+.. [#remove_quotas]
+
+    >>> queue.quotas.create('content catalog')
+    >>> job.quota_names = ('content catalog',)
+    >>> queue.quotas.remove('content catalog')
+    >>> job.quota_names
+    ('content catalog',)
+    >>> job is queue.claim()
+    True
+    >>> len(queue)
+    0
+
+.. [#default_size]
+
+    >>> queue.quotas.create('content catalog')
+    >>> queue.quotas['content catalog'].size
+    1
+
+.. [#change_size]
+
+    >>> queue.quotas['content catalog'].size = 2
+    >>> queue.quotas['content catalog'].size
+    2
+    >>> queue.quotas.create('frobnitz account', size=3)
+    >>> queue.quotas['frobnitz account'].size
+    3
+
+.. [#decreasing_affects_future]
+
+    >>> job1 = zc.async.job.Job(operator.mul, 5, 2)
+    >>> job2 = zc.async.job.Job(operator.mul, 5, 2)
+    >>> job3 = zc.async.job.Job(operator.mul, 5, 2)
+    >>> job1.quota_names = job2.quota_names = job3.quota_names = (
+    ...     'content catalog',)
+    >>> job1 is queue.put(job1)
+    True
+    >>> job2 is queue.put(job2)
+    True
+    >>> job3 is queue.put(job3)
+    True
+    >>> job1 is queue.claim()
+    True
+    >>> job2 is queue.claim()
+    True
+    >>> print queue.claim()
+    None
+    >>> quota = queue.quotas['content catalog']
+    >>> len(quota)
+    2
+    >>> list(quota) == [job1, job2]
+    True
+    >>> quota.filled
+    True
+    >>> quota.size = 1
+    >>> quota.filled
+    True
+    >>> print queue.claim()
+    None
+    >>> job1()
+    10
+    >>> print queue.claim()
+    None
+    >>> len(quota)
+    1
+    >>> list(quota) == [job2]
+    True
+    >>> job2()
+    10
+    >>> job3 is queue.claim()
+    True
+    >>> list(quota) == [job3]
+    True
+    >>> len(quota)
+    1
+    >>> job3()
+    10
+    >>> print queue.claim()
+    None
+    >>> len(queue)
+    0
+    >>> quota.clean()
+    >>> len(quota)
+    0
+    >>> quota.filled
+    False
+
+.. [#get_poll]
+
+    >>> import time
+    >>> def get_poll(count = None):
+    ...     if count is None:
+    ...         count = len(dispatcher.polls)
+    ...     for i in range(30):
+    ...         if len(dispatcher.polls) > count:
+    ...             return dispatcher.polls.first()
+    ...         time.sleep(0.1)
+    ...     else:
+    ...         assert False, 'no poll!'
+    ... 
+
+.. [#stop_config_reactor] We don't want the live dispatcher for our demos,
+    actually.  See dispatcher.txt to see the live dispatcher actually in use.
+
+    >>> reactor.callFromThread(reactor.stop)
+    >>> for i in range(30):
+    ...     if not dispatcher.activated:
+    ...         break
+    ...     time.sleep(0.1)
+    ... else:
+    ...     assert False, 'dispatcher did not deactivate'
+    ...
+
+    Now, we'll restart with an explicit reactor.
+    
+    >>> import zc.async.testing
+    >>> reactor = zc.async.testing.Reactor()
+    >>> dispatcher.reactor = reactor
+    >>> dispatcher.activate()
+    >>> reactor.start()

Copied: zc.async/trunk/src/zc/async/README_3.txt (from rev 85211, zc.async/branches/dev/src/zc/async/README_3.txt)
===================================================================
--- zc.async/trunk/src/zc/async/README_3.txt	                        (rev 0)
+++ zc.async/trunk/src/zc/async/README_3.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,302 @@
+=========================
+Configuration with Zope 3
+=========================
+
+Our last main section can be the shortest yet, both because we've already
+introduced all of the main concepts, and because we will be leveraging
+conveniences to automate much of the configuration shown in the section
+discussing configuration without Zope 3.
+
+If you want to set up a client alone, without a dispatcher, include
+configure.zcml, make sure you share the database in which the queues will be
+held, and make sure that either the
+zope.app.keyreference.persistent.connectionOfPersistent adapter is registered,
+or zc.twist.connection.
+
+For a client/server combination, use zcml that is something like the
+basic_dispatcher_policy.zcml, make sure you have access to the database with
+the queues, configure logging and monitoring as desired, configure the
+``ZC_ASYNC_UUID`` environmental variable in zdaemon.conf if you are in
+production, and start up! Getting started is really pretty easy. You can even
+start a dispatcher-only version by not starting any servers in zcml.
+
+We'll look at this by making a zope.conf-alike and a site.zcml-alike.  We'll
+need a place to put some files, so we'll use a temporary directory.  This, and
+the comments in the files that we set up, are the primary differences between
+our examples and a real set up.
+
+So, without further ado, here is the text of our zope.conf-alike, and of our
+site.zcml-alike [#get_vals]_.  We'll be using two databases for this example,
+as you might want for a site with a fair amount of zc.async usage.
+
+    >>> zope_conf = """
+    ... site-definition %(site_zcml_file)s
+    ...
+    ... <zodb main>
+    ...   <filestorage>
+    ...     create true
+    ...     path %(main_storage_path)s
+    ...   </filestorage>
+    ... </zodb>
+    ... 
+    ... <zodb async>
+    ...   <filestorage>
+    ...     create true
+    ...     path %(async_storage_path)s
+    ...   </filestorage>
+    ... </zodb>
+    ... 
+    ... <product-config zc.z3monitor>
+    ...   port %(monitor_port)s
+    ... </product-config>
+    ... 
+    ... <logger>
+    ...   level debug
+    ...   name zc.async
+    ...   propagate no
+    ... 
+    ...   <logfile>
+    ...     path %(async_event_log)s
+    ...   </logfile>
+    ... </logger>
+    ... 
+    ... <logger>
+    ...   level debug
+    ...   name zc.async.trace
+    ...   propagate no
+    ... 
+    ...   <logfile>
+    ...     path %(async_trace_log)s
+    ...   </logfile>
+    ... </logger>
+    ... 
+    ... <eventlog>
+    ...   <logfile>
+    ...     formatter zope.exceptions.log.Formatter
+    ...     path STDOUT
+    ...   </logfile>
+    ...   <logfile>
+    ...     formatter zope.exceptions.log.Formatter
+    ...     path %(event_log)s
+    ...   </logfile>
+    ... </eventlog>
+    ... """ % {'site_zcml_file': site_zcml_file,
+    ...        'main_storage_path': os.path.join(dir, 'main.fs'),
+    ...        'async_storage_path': os.path.join(dir, 'async.fs'),
+    ...        'monitor_port': monitor_port,
+    ...        'event_log': os.path.join(dir, 'z3.log'),
+    ...        'async_event_log': os.path.join(dir, 'async.log'),
+    ...        'async_trace_log': os.path.join(dir, 'async_trace.log'),}
+    ... 
+
+In a non-trivial production system of you will also probably want to replace
+the two file storages with two <zeoclient> stanzas.
+
+Also note that an open monitor port should be behind a firewall, of course.
+
+We'll assume that zdaemon.conf has been set up to put ZC_ASYNC_UUID in the
+proper place too.  It would have looked something like this in the
+zdaemon.conf::
+
+    <environment>
+      ZC_ASYNC_UUID /path/to/uuid.txt
+    </environment>
+
+(Other tools, such as supervisor, also can work, of course; their spellings are
+different and are "left as an exercise to the reader" at the moment.)
+
+We'll do that by hand:
+
+    >>> os.environ['ZC_ASYNC_UUID'] = os.path.join(dir, 'uuid.txt')
+
+Now let's define our site-zcml-alike.
+
+    >>> site_zcml = """
+    ... <configure xmlns='http://namespaces.zope.org/zope'
+    ...            xmlns:meta="http://namespaces.zope.org/meta"
+    ...            >
+    ... <include package="zope.component" file="meta.zcml" />
+    ... <include package="zope.component" />
+    ... <include package="zc.z3monitor" />
+    ... <include package="zc.async" file="multidb_dispatcher_policy.zcml" />
+    ...
+    ... <!-- this is usually handled in Zope applications by the
+    ...      zope.app.keyreference.persistent.connectionOfPersistent adapter -->
+    ... <adapter factory="zc.twist.connection" />
+    ... </configure>
+    ... """
+
+Now we're done.
+
+If you want to change policy, change "multidb_dispatcher_policy.zcml" to
+"dispatcher.zcml" in the example above and register your replacement bits for
+the policy in "multidb_dispatcher_policy.zcml".  You'll see that most of that
+comes from code in subscribers.py, which can be adjusted easily.
+
+If we process these files, and wait for a poll, we've got a working
+set up [#process]_.
+
+    >>> import zc.async.dispatcher
+    >>> dispatcher = zc.async.dispatcher.get()
+    >>> import pprint
+    >>> pprint.pprint(get_poll(0))
+    {'': {'main': {'active jobs': [],
+                   'error': None,
+                   'len': 0,
+                   'new jobs': [],
+                   'size': 3}}}
+    >>> bool(dispatcher.activated)
+    True
+
+We can ask for a job to be performed, and get the result.
+
+    >>> conn = db.open()
+    >>> root = conn.root()
+    >>> import zc.async.interfaces
+    >>> queue = zc.async.interfaces.IQueue(root)
+    >>> import operator
+    >>> import zc.async.job
+    >>> job = queue.put(zc.async.job.Job(operator.mul, 21, 2))
+    >>> import transaction
+    >>> transaction.commit()
+    >>> wait_for_result(job)
+    42
+
+We can connect to the monitor server with telnet.
+
+    >>> import telnetlib
+    >>> tn = telnetlib.Telnet('127.0.0.1', monitor_port)
+    >>> tn.write('async status\n') # immediately disconnects
+    >>> print tn.read_all() # doctest: +ELLIPSIS
+    {
+        "poll interval": {
+            "seconds": ...
+        }, 
+        "status": "RUNNING", 
+        "time since last poll": {
+            "seconds": ...
+        }, 
+        "uptime": {
+            "seconds": ...
+        }, 
+        "uuid": "..."
+    }
+    <BLANKLINE>
+
+Now we'll "shut down" with a CTRL-C, or SIGINT, and clean up.
+
+    >>> import signal
+    >>> if getattr(os, 'getpid', None) is not None: # UNIXEN, not Windows
+    ...     pid = os.getpid()
+    ...     try:
+    ...         os.kill(pid, signal.SIGINT)
+    ...     except KeyboardInterrupt:
+    ...         if dispatcher.activated:
+    ...             assert False, 'dispatcher did not deactivate'
+    ...     else:
+    ...         print "failed to send SIGINT, or something"
+    ... else:
+    ...     dispatcher.reactor.callFromThread(dispatcher.reactor.stop)
+    ...     for i in range(30):
+    ...         if not dispatcher.activated:
+    ...             break
+    ...         time.sleep(0.1)
+    ...     else:
+    ...         assert False, 'dispatcher did not deactivate'
+    ...
+    >>> import transaction
+    >>> t = transaction.begin() # sync
+    >>> import zope.component
+    >>> import zc.async.interfaces
+    >>> uuid = zope.component.getUtility(zc.async.interfaces.IUUID)
+    >>> da = queue.dispatchers[uuid]
+    >>> bool(da.activated)
+    False
+
+    >>> db.close()
+    >>> db.databases['async'].close()
+    >>> import shutil
+    >>> shutil.rmtree(dir)
+
+Hopefully zc.async will be an easy-to-configure, easy-to-use, and useful tool
+for you! Good luck!
+
+.. ......... ..
+.. Footnotes ..
+.. ......... ..
+
+.. [#get_vals]
+
+    >>> import errno, os, random, socket, tempfile
+    >>> dir = tempfile.mkdtemp()
+    >>> site_zcml_file = os.path.join(dir, 'site.zcml')
+
+    >>> s = socket.socket()
+    >>> for i in range(20):
+    ...     monitor_port = random.randint(20000, 49151)
+    ...     try:
+    ...         s.bind(('127.0.0.1', monitor_port))
+    ...     except socket.error, e:
+    ...         if e.args[0] == errno.EADDRINUSE:
+    ...             pass
+    ...         else:
+    ...             raise
+    ...     else:
+    ...         s.close()
+    ...         break
+    ... else:
+    ...     assert False, 'could not find available port'
+    ...     monitor_port = None
+    ...
+
+.. [#process]
+
+    >>> zope_conf_file = os.path.join(dir, 'zope.conf')
+    >>> f = open(zope_conf_file, 'w')
+    >>> f.write(zope_conf)
+    >>> f.close()
+    >>> f = open(site_zcml_file, 'w')
+    >>> f.write(site_zcml)
+    >>> f.close()
+
+    >>> import zdaemon.zdoptions
+    >>> import zope.app.appsetup
+    >>> options = zdaemon.zdoptions.ZDOptions()
+    >>> options.schemadir = os.path.join(
+    ...     os.path.dirname(os.path.abspath(zope.app.appsetup.__file__)),
+    ...     'schema')
+    >>> options.realize(['-C', zope_conf_file])
+    >>> config = options.configroot
+
+    >>> import zope.app.appsetup.product
+    >>> zope.app.appsetup.product.setProductConfigurations(
+    ...     config.product_config)
+    >>> ignore = zope.app.appsetup.config(config.site_definition)
+    >>> import zope.app.appsetup.appsetup
+    >>> db = zope.app.appsetup.appsetup.multi_database(config.databases)[0][0]
+
+    >>> import zope.event
+    >>> import zc.async.interfaces
+    >>> zope.event.notify(zc.async.interfaces.DatabaseOpened(db))
+
+    >>> import time
+    >>> def get_poll(count=None): # just a helper used later, not processing
+    ...     if count is None:
+    ...         count = len(dispatcher.polls)
+    ...     for i in range(30):
+    ...         if len(dispatcher.polls) > count:
+    ...             return dispatcher.polls.first()
+    ...         time.sleep(0.1)
+    ...     else:
+    ...         assert False, 'no poll!'
+    ... 
+
+    >>> def wait_for_result(job): # just a helper used later, not processing
+    ...     for i in range(30):
+    ...         t = transaction.begin()
+    ...         if job.status == zc.async.interfaces.COMPLETED:
+    ...             return job.result
+    ...         time.sleep(0.5)
+    ...     else:
+    ...         assert False, 'job never completed'
+    ...
\ No newline at end of file

Modified: zc.async/trunk/src/zc/async/TODO.txt
===================================================================
--- zc.async/trunk/src/zc/async/TODO.txt	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/TODO.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -1,30 +1,14 @@
-For beta:
+- Write the z3monitor tests.
+- See if combined README + README_2 + README_3 makes a comprehensible document
 
-- Refactor Worker API, harden engine API.  Policy decisions should be within
-  workers.  IDataManager.pullNext is a mistake; this is policy that should be
-  pushed down to the worker.  Write doctest for engine.
-- Refactor README as introductory text, or provide alternative simple start.
-  Split setup code into another document (or put as footnote)?
-- Write configure.zcml
-- Write doctest for edge cases.
-- Write stress test.
-- Make it possible to have a zc.twist partial to retry forever.
-- Contemplate thread-only Medusa workers.
-- Clarify that only subscribers.py is Zope 3-specific; the rest is ZODB only.
+For future versions:
 
-For RC:
+- queues should be pluggable like agent with filter
+- show how to broadcast, maybe add conveniences
+- show how to use with collapsing jobs (hint to future self: use external queue
+  to put in work, and have job(s) just pull what they can see from queue)
 
-- Announce RC, wait a bit for any feedback
-- Deploy internally
+For some other package, maybe:
 
-For final:
-
-- Feel good about RC
-
-For future versions:
-
 - TTW Management and logging views, as in zasync (see goals in the "History"
-  section of the README).
-- Write a Zope 3 request/context munger that sets security context and site
-  based on current values.
-- Maybe become friendly to Medusa.
+  section of the README).
\ No newline at end of file

Modified: zc.async/trunk/src/zc/async/__init__.py
===================================================================
--- zc.async/trunk/src/zc/async/__init__.py	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/__init__.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1 @@
+from zc.async.dispatcher import local

Modified: zc.async/trunk/src/zc/async/adapters.py
===================================================================
--- zc.async/trunk/src/zc/async/adapters.py	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/adapters.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -1,121 +1,11 @@
-import types
-import datetime
-import pytz
-import persistent
 import persistent.interfaces
 import zope.interface
 import zope.component
 
 import zc.async.interfaces
-import zc.async.subscribers
-import zc.set
-from zc.async import rwproperty
 
 
 @zope.component.adapter(persistent.interfaces.IPersistent)
- at zope.interface.implementer(zc.async.interfaces.IDataManager)
-def defaultDataManagerAdapter(obj):
-    return obj._p_jar.root()[zc.async.subscribers.NAME]
-
-
- at zope.component.adapter(zc.async.interfaces.IPartial)
- at zope.interface.implementer(zc.async.interfaces.IDataManager)
-def partial_to_datamanager(partial):
-    p = partial.__parent__
-    while (p is not None and
-           not zc.async.interfaces.IDataManager.providedBy(p)):
-        p = getattr(p, '__parent__', None)
-    return p
-
-
-class TransparentDescriptor(object):
-    def __init__(self, src_name, value_name, readonly=False):
-        self.src_name = src_name
-        self.value_name = value_name
-        self.readonly = readonly
-
-    def __get__(self, obj, klass=None):
-        if obj is None:
-            return self
-        src = getattr(obj, self.src_name)
-        return getattr(src, self.value_name)
-
-    def __set__(self, obj, value):
-        if self.readonly:
-            raise AttributeError
-        src = getattr(obj, self.src_name)
-        setattr(src, self.value_name, value)
-
-
-class DataManagerPartialData(persistent.Persistent):
-
-    workerUUID = assignerUUID = thread = None
-    _begin_by = _begin_after = None
-
-    def __init__(self, partial):
-        self.__parent__ = self.partial = partial
-        self.selectedUUIDs = zc.set.Set()
-        self.excludedUUIDs = zc.set.Set()
-
-    @property
-    def begin_after(self):
-        return self._begin_after
-    @rwproperty.setproperty
-    def begin_after(self, value):
-        if self.assignerUUID is not None:
-            raise RuntimeError(
-                'can only change begin_after before partial is assigned')
-        if value is not None:
-            if value.tzinfo is None:
-                raise ValueError('cannot use timezone-naive values')
-            else:
-                value = value.astimezone(pytz.UTC)
-        self._begin_after = value
-
-    @property
-    def begin_by(self):
-        return self._begin_by
-    @rwproperty.setproperty
-    def begin_by(self, value):
-        if self.partial.state != zc.async.interfaces.PENDING:
-            raise RuntimeError(
-                'can only change begin_by value of PENDING partial')
-        if value is not None:
-            if value < datetime.timedelta():
-                raise ValueError('negative values are not allowed')
-        self._begin_by = value
-
-KEY = 'zc.async.datamanagerpartial'
-
-
-class DataManagerPartial(persistent.Persistent):
-    zope.interface.implements(zc.async.interfaces.IDataManagerPartial)
-    zope.component.adapts(zc.async.interfaces.IPartial)
-
-    def __init__(self, partial):
-        self._data = partial
-        if KEY not in partial.annotations:
-            partial.annotations[KEY] = DataManagerPartialData(partial)
-        self._extra = partial.annotations[KEY]
-
-    for nm in zc.async.interfaces.IPartial.names(True):
-        if nm == '__parent__':
-            readonly = False
-        else:
-            readonly = True
-        locals()[nm] = TransparentDescriptor('_data', nm, readonly)
-    for nm in ('workerUUID', 'assignerUUID', 'thread', 'begin_after',
-               'begin_by'):
-        locals()[nm] = TransparentDescriptor('_extra', nm)
-    for nm in ('selectedUUIDs', 'excludedUUIDs'):
-        locals()[nm] = TransparentDescriptor('_extra', nm, True)
-
- at zope.component.adapter(types.MethodType)
- at zope.interface.implementer(zc.async.interfaces.IDataManagerPartial)
-def method_to_datamanagerpartial(m):
-    return DataManagerPartial(zc.async.partial.Partial(m))
-
- at zope.component.adapter(types.FunctionType)
- at zope.interface.implementer(zc.async.interfaces.IDataManagerPartial)
-def function_to_datamanagerpartial(f):
-    return DataManagerPartial(zc.async.partial.Partial(f))
+ at zope.interface.implementer(zc.async.interfaces.IQueue)
+def defaultQueueAdapter(obj):
+    return obj._p_jar.root()[zc.async.interfaces.KEY]['']

Copied: zc.async/trunk/src/zc/async/agent.py (from rev 85211, zc.async/branches/dev/src/zc/async/agent.py)
===================================================================
--- zc.async/trunk/src/zc/async/agent.py	                        (rev 0)
+++ zc.async/trunk/src/zc/async/agent.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,71 @@
+import persistent
+import datetime
+
+import zope.interface
+import zope.component
+
+import zc.async.interfaces
+import zc.async.utils
+
+
+def chooseFirst(agent):
+    return agent.queue.claim()
+
+
+class Agent(zc.async.utils.Base):
+
+    zope.interface.implements(zc.async.interfaces.IAgent)
+
+    def __init__(self, chooser=None, size=3):
+        if chooser is None:
+            chooser = chooseFirst
+        self.chooser = chooser
+        self.size = size
+        self._data = zc.queue.PersistentQueue()
+        self._data.__parent__ = self
+        self.completed = zc.async.utils.Periodic(
+            period=datetime.timedelta(days=7),
+            buckets=7)
+        zope.interface.alsoProvides(
+            self.completed, zc.async.interfaces.ICompletedCollection)
+        self.completed.__parent__ = self
+
+    @property
+    def queue(self):
+        if self.parent is not None:
+            return self.parent.parent
+
+    for nm in ('__len__', '__iter__', '__getitem__', '__nonzero__', 'pull'):
+        locals()[nm] = zc.async.utils.simpleWrapper(nm)
+
+    def index(self, item):
+        for ix, i in enumerate(self):
+            if i is item:
+                return ix
+        raise ValueError("%r not in %s" % (item, self.__class__.__name__))
+
+    def remove(self, item):
+        del self[self.index(item)]
+
+    def __delitem__(self, ix):
+        self._data.pull(ix)
+
+    def claimJob(self):
+        if len(self._data) < self.size:
+            res = self.chooser(self)
+            if res is not None:
+                res.parent = self
+                self._data.put(res)
+        else:
+            res = None
+        return res
+
+    def jobCompleted(self, job):
+        self.remove(job)
+        self.completed.add(job)
+
+ at zope.component.adapter(zc.async.interfaces.IDispatcherActivated)
+def addMainAgentActivationHandler(event):
+    da = event.object
+    if 'main' not in da:
+        da['main'] = Agent()

Copied: zc.async/trunk/src/zc/async/agent.txt (from rev 85211, zc.async/branches/dev/src/zc/async/agent.txt)
===================================================================
--- zc.async/trunk/src/zc/async/agent.txt	                        (rev 0)
+++ zc.async/trunk/src/zc/async/agent.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,140 @@
+Agents choose and keep track of jobs for a dispatcher.  It is a
+component in the zc.async design that is intended to be pluggable.
+
+Arguably the most interesting method to control is ``claimJob``.  It is 
+responsible for getting the next job from the queue.
+
+The default implementation in zc.async.agent allows you to pass in a callable
+that, given the agent, claims and returns the desired job.  The default
+callable simply asks for the next job.
+
+Let's take a quick look at how the agent works.  Let's imagine we have a
+queue with a dispatcher with an agent [#setUp]_.
+
+The agent is initially empty.
+
+    >>> len(agent)
+    0
+    >>> bool(agent)
+    False
+    >>> list(agent)
+    []
+
+Dispatchers ask the agent to claim jobs.  Initially there are no jobs to
+claim.
+
+    >>> print agent.claimJob()
+    None
+    >>> list(agent)
+    []
+
+We can add some jobs to claim.
+
+    >>> def mock_work():
+    ...     return 42
+    ...
+    >>> job1 = queue.put(mock_work)
+    >>> job2 = queue.put(mock_work)
+    >>> job3 = queue.put(mock_work)
+    >>> job4 = queue.put(mock_work)
+    >>> job5 = queue.put(mock_work)
+
+It will only claim as many active jobs as its size.
+
+    >>> agent.size
+    3
+    >>> job1 is agent.claimJob()
+    True
+    >>> job2 is agent.claimJob()
+    True
+    >>> job3 is agent.claimJob()
+    True
+    >>> print agent.claimJob()
+    None
+    >>> len(agent)
+    3
+    >>> list(agent) == [job1, job2, job3]
+    True
+    >>> job1.parent is agent
+    True
+    >>> job2.parent is agent
+    True
+    >>> job3.parent is agent
+    True
+
+When a job informs its agent that it is done, the agent moves the job to
+the ``completed`` collection [#test_completed]_.
+
+    >>> len(agent.completed)
+    0
+    >>> job2()
+    42
+    >>> list(agent) == [job1, job3]
+    True
+    >>> len(agent)
+    2
+    >>> len(agent.completed)
+    1
+    >>> list(agent.completed) == [job2]
+    True
+    >>> job2.parent is agent
+    True
+
+The completed collection rotates, by default, to get old jobs rotated out in
+about a week.
+
+Now we can claim another job.
+
+    >>> job4 is agent.claimJob()
+    True
+    >>> print agent.claimJob()
+    None
+    >>> list(agent) == [job1, job3, job4]
+    True
+    >>> len(agent)
+    3
+
+This particular agent invites you to provide a function to choose jobs.
+The default one simply chooses the first available job in the queue.
+
+.. [#setUp] First we'll get a database and the necessary registrations.
+
+    >>> from ZODB.tests.util import DB
+    >>> db = DB()
+    >>> conn = db.open()
+    >>> root = conn.root()
+    >>> import zc.async.configure
+    >>> zc.async.configure.base()
+
+    Now we need a queue.
+
+    >>> import zc.async.queue
+    >>> import zc.async.interfaces
+    >>> container = root[zc.async.interfaces.KEY] = zc.async.queue.Queues()
+    >>> queue = container[''] = zc.async.queue.Queue()
+    >>> import transaction
+    >>> transaction.commit()
+
+    Now we need an activated dispatcher agents collection.
+    
+    >>> import zc.async.instanceuuid
+    >>> queue.dispatchers.register(zc.async.instanceuuid.UUID)
+    >>> da = queue.dispatchers[zc.async.instanceuuid.UUID]
+    >>> da.activate()
+
+    And now we need an agent.
+    
+    >>> import zc.async.agent
+    >>> agent = da['main'] = zc.async.agent.Agent()
+    >>> agent.name
+    'main'
+    >>> agent.parent is da
+    True
+
+.. [#test_completed]
+
+    >>> import zope.interface.verify
+    >>> zope.interface.verify.verifyObject(
+    ...     zc.async.interfaces.ICompletedCollection,
+    ...     agent.completed)
+    True

Copied: zc.async/trunk/src/zc/async/basic_dispatcher_policy.zcml (from rev 85211, zc.async/branches/dev/src/zc/async/basic_dispatcher_policy.zcml)
===================================================================
--- zc.async/trunk/src/zc/async/basic_dispatcher_policy.zcml	                        (rev 0)
+++ zc.async/trunk/src/zc/async/basic_dispatcher_policy.zcml	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configure xmlns="http://namespaces.zope.org/zope">
+    <include file="dispatcher.zcml" />
+    <subscriber handler=".subscribers.queue_installer" />
+    <subscriber handler=".subscribers.threaded_dispatcher_installer" />
+    <subscriber handler=".subscribers.agent_installer" />
+    <adapter factory="zc.async.queue.getDefaultQueue" />
+</configure>

Copied: zc.async/trunk/src/zc/async/configure.py (from rev 85211, zc.async/branches/dev/src/zc/async/configure.py)
===================================================================
--- zc.async/trunk/src/zc/async/configure.py	                        (rev 0)
+++ zc.async/trunk/src/zc/async/configure.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,46 @@
+import types
+
+import zc.twist
+import zope.component
+import ZODB.interfaces
+
+import zc.async.interfaces
+import zc.async.job
+import zc.async.instanceuuid
+
+# These functions accomplish what configure.zcml does; you don't want both
+# to be in play (the component registry will complain).
+
+def minimal():
+    # use this ``minimal`` function if you have the
+    # zope.app.keyreference.persistent.connectionOfPersistent adapter
+    # installed in your zope.component registry.  Otherwise use ``base``
+    # below.
+
+    # persistent object and connection -> transaction manager
+    zope.component.provideAdapter(zc.twist.transactionManager)
+    zope.component.provideAdapter(zc.twist.transactionManager,
+                                  adapts=(ZODB.interfaces.IConnection,))
+
+    # function and method -> job
+    zope.component.provideAdapter(
+        zc.async.job.Job,
+        adapts=(types.FunctionType,),
+        provides=zc.async.interfaces.IJob)
+    zope.component.provideAdapter(
+        zc.async.job.Job,
+        adapts=(types.MethodType,),
+        provides=zc.async.interfaces.IJob)
+    zope.component.provideAdapter( # optional, rarely used
+        zc.async.job.Job,
+        adapts=(zc.twist.METHOD_WRAPPER_TYPE,),
+        provides=zc.async.interfaces.IJob)
+
+    # UUID for this instance
+    zope.component.provideUtility(
+        zc.async.instanceuuid.UUID, zc.async.interfaces.IUUID)
+
+def base():
+    # see comment in ``minimal``, above
+    minimal()
+    zope.component.provideAdapter(zc.twist.connection)
\ No newline at end of file

Copied: zc.async/trunk/src/zc/async/configure.zcml (from rev 85211, zc.async/branches/dev/src/zc/async/configure.zcml)
===================================================================
--- zc.async/trunk/src/zc/async/configure.zcml	                        (rev 0)
+++ zc.async/trunk/src/zc/async/configure.zcml	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configure xmlns="http://namespaces.zope.org/zope">
+    <utility component=".instanceuuid.UUID" />
+    <adapter factory="zc.twist.transactionManager" />
+    <adapter factory="zc.twist.transactionManager"
+             for="ZODB.interfaces.IConnection" />
+    <!-- this is usually handled in Zope applications by the
+         zope.app.keyreference.persistent.connectionOfPersistent adapter
+    <adapter factory="zc.twist.connection" /> -->
+    <adapter factory="zc.async.job.Job"
+             for="types.FunctionType"
+             provides="zc.async.interfaces.IJob" />
+    <adapter factory="zc.async.job.Job"
+             for="types.MethodType"
+             provides="zc.async.interfaces.IJob" />
+    <adapter factory="zc.async.job.Job"
+             for="zc.twist.METHOD_WRAPPER_TYPE"
+             provides="zc.async.interfaces.IJob" />
+</configure>

Deleted: zc.async/trunk/src/zc/async/datamanager.py
===================================================================
--- zc.async/trunk/src/zc/async/datamanager.py	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/datamanager.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -1,403 +0,0 @@
-import datetime
-import bisect
-import pytz
-import persistent
-import ZODB.interfaces
-import BTrees.OOBTree
-import BTrees.Length
-import zope.interface
-import zope.component
-import zope.bforest
-import zc.queue
-
-import zc.async.interfaces
-
-
-def simpleWrapper(name):
-    def wrapper(self, *args, **kwargs):
-        return getattr(self._data, name)(*args, **kwargs)
-    return wrapper
-
-class Workers(persistent.Persistent):
-    zope.interface.implements(zc.async.interfaces.IWorkers)
-
-    def __init__(self):
-        self._data = BTrees.OOBTree.OOBTree()
-
-    for nm in ('__getitem__', 'get', '__len__', 'keys', 'values', 'items',
-               '__contains__', 'maxKey', 'minKey'):
-        locals()[nm] = simpleWrapper(nm)
-
-    def __iter__(self):
-        return iter(self._data)
-
-    def add(self, value):
-        value = zc.async.interfaces.IWorker(value)
-        if value.UUID is None:
-            raise ValueError("worker must have assigned UUID")
-        self._data[value.UUID] = value
-        value.__parent__ = self
-        return value
-
-    def remove(self, UUID):
-        ob = self._data.pop(UUID)
-        ob.__parent__ = None
-
-def cleanDeadWorker(worker):
-    dm = worker.__parent__.__parent__
-    assert zc.async.interfaces.IDataManager.providedBy(dm)
-    for queue, destination in (
-        (worker.thread, dm.thread), (worker.reactor, dm.reactor)):
-        while queue:
-            p = queue[0]
-            del queue[0]
-            if p.state == zc.async.interfaces.PENDING:
-                destination.put(p.__call__) # will wrap it
-            elif p.state == zc.async.interfaces.ACTIVE:
-                destination.put(p.fail)
-            elif p.state == zc.async.interfaces.CALLBACKS:
-                destination.put(p.resumeCallbacks)
-    
-
-class PartialQueue(persistent.Persistent):
-    zope.interface.implements(zc.async.interfaces.IPartialQueue)
-
-    def __init__(self, thread):
-        self.thread = thread
-        self._queue = zc.queue.CompositePersistentQueue()
-        self._held = BTrees.OOBTree.OOBTree()
-        self._length = BTrees.Length.Length(0)
-
-    def put(self, item, begin_after=None, begin_by=None):
-        item = zc.async.interfaces.IDataManagerPartial(item)
-        if item.assignerUUID is not None:
-            raise ValueError(
-                'cannot add already-assigned partial')
-        now = datetime.datetime.now(pytz.UTC)
-        if begin_after is not None:
-            item.begin_after = begin_after
-        elif item.begin_after is None:
-            item.begin_after = now
-        if begin_by is not None:
-            item.begin_by = begin_by
-        elif item.begin_by is None:
-            item.begin_by = datetime.timedelta(hours=1)
-        item.assignerUUID = zope.component.getUtility(
-            zc.async.interfaces.IUUID, 'instance')
-        if item._p_jar is None:
-            # we need to do this if the partial will be stored in another
-            # database as well during this transaction.  Also, _held storage
-            # disambiguates against the database_name and the _p_oid.
-            conn = ZODB.interfaces.IConnection(self)
-            conn.add(item)
-        if now == item.begin_after:
-            self._queue.put(item)
-        else:
-            self._held[
-                (item.begin_after,
-                 item._p_jar.db().database_name,
-                 item._p_oid)] = item
-        item.__parent__ = self
-        self._length.change(1)
-        return item
-
-    def _iter(self):
-        queue = self._queue
-        tree = self._held
-        q = enumerate(queue)
-        t = iter(tree.items())
-        q_pop = queue.pull
-        t_pop = tree.pop
-        def get_next(i):
-            try:
-                next = i.next()
-            except StopIteration:
-                active = False
-                next = (None, None)
-            else:
-                active = True
-            return active, next
-        q_active, (q_index, q_next) = get_next(q)
-        t_active, (t_index, t_next) = get_next(t)
-        while q_active and t_active:
-            if t_next.begin_after <= q_next.begin_after:
-                yield t_pop, t_index, t_next
-                t_active, (t_index, t_next) = get_next(t)
-            else:
-                yield q_pop, q_index, q_next
-                q_active, (q_index, q_next) = get_next(q)
-        if t_active:
-            yield t_pop, t_index, t_next
-            for (t_index, t_next) in t:
-                yield t_pop, t_index, t_next
-        elif q_active:
-            yield q_pop, q_index, q_next
-            for (q_index, q_next) in q:
-                yield q_pop, q_index, q_next
-
-    def pull(self, index=0):
-        if index >= self._length():
-            raise IndexError(index)
-        for i, (pop, ix, next) in enumerate(self._iter()):
-            if i == index:
-                tmp = pop(ix)
-                assert tmp is next
-                self._length.change(-1)
-                return next
-        assert False, 'programmer error: the length appears to be incorrect.'
-
-    def __len__(self):
-        return self._length()
-
-    def __iter__(self):
-        return (next for pop, ix, next in self._iter())
-
-    def __nonzero__(self):
-        return bool(self._length())
-
-    def __getitem__(self, index):
-        if index >= len(self):
-            raise IndexError(index)
-        return zc.queue.getitem(self, index)
-
-    def pullNext(self, uuid):
-        now = datetime.datetime.now(pytz.UTC)
-        for ix, p in enumerate(self.iterDue()):
-            if uuid not in p.excludedUUIDs and (
-                not p.selectedUUIDs or
-                uuid in p.selectedUUIDs):
-                return self.pull(ix)
-            elif (p.begin_after + p.begin_by) < now:
-                res = zc.async.interfaces.IDataManagerPartial(
-                        self.pull(ix).fail)
-                res.__parent__ = self
-                res.begin_after = now
-                res.begin_by = datetime.timedelta(hours=1)
-                res.assignerUUID = zope.component.getUtility(
-                    zc.async.interfaces.IUUID, 'instance')
-                return res
-
-    def iterDue(self):
-        now = datetime.datetime.now(pytz.UTC)
-        for partial in self:
-            if partial.begin_after > now:
-                break
-            yield partial
-
-
-class DataManager(persistent.Persistent):
-    zope.interface.implements(zc.async.interfaces.IDataManager)
-
-    def __init__(self):
-        self.thread = PartialQueue(True)
-        self.thread.__parent__ = self
-        self.reactor = PartialQueue(False)
-        self.reactor.__parent__ = self
-        self.workers = Workers()
-        self.workers.__parent__ = self
-
-    def _getNextActiveSibling(self, uuid):
-        for worker in self.workers.values(min=uuid, excludemin=True):
-            if worker.engineUUID is not None:
-                return worker
-        for worker in self.workers.values(max=uuid, excludemax=True):
-            if worker.engineUUID is not None:
-                return worker
-
-    def checkSibling(self, uuid):
-        now = datetime.datetime.now(pytz.UTC)
-        next = self._getNextActiveSibling(uuid)
-        if next is not None and ((
-            next.last_ping + next.ping_interval + next.ping_death_interval)
-            < now):
-            # `next` is a dead worker.
-            next.engineUUID = None
-            self.thread.put(zc.async.partial.Partial(cleanDeadWorker, next))
-
-
-class SizedSequence(persistent.Persistent):
-    zope.interface.implements(zc.async.interfaces.ISizedSequence)
-
-    def __init__(self, size):
-        self.size = size
-        self._data = zc.queue.PersistentQueue()
-        self._data.__parent__ = self
-
-    for nm in ('__len__', '__iter__', '__getitem__', '__nonzero__',
-               '_p_resolveConflict'):
-        locals()[nm] = simpleWrapper(nm)
-
-    def add(self, item):
-        if len(self._data) >= self.size:
-            raise zc.async.interfaces.FullError(self)
-        item.__parent__ = self
-        item.workerUUID = self.__parent__.UUID
-        self._data.put(item)
-        return item
-
-    def index(self, item):
-        for ix, i in enumerate(self):
-            if i is item:
-                return ix
-        raise ValueError("%r not in queue" % (item,))
-
-    def remove(self, item):
-        del self[self.index(item)]
-
-    def __delitem__(self, ix):
-        self._data.pull(ix)
-
-
-START = datetime.datetime(2006, 1, 1, tzinfo=pytz.UTC)
-
-def key(item):
-    dt = item.begin_after
-    diff = dt - START
-    return (-diff.days, -diff.seconds, -diff.microseconds,
-            item._p_jar.db().database_name, item._p_oid)
-
-def code(dt):
-    diff = dt - START
-    return (-diff.days, -diff.seconds, -diff.microseconds)
-
-
-class Completed(persistent.Persistent):
-    zope.interface.implements(zc.async.interfaces.ICompletedCollection)
-    # sorts on begin_after from newest to oldest
-
-    __parent__ = None
-
-    def __init__(self,
-                 rotation_interval=datetime.timedelta(hours=2),
-                 buckets=6):
-        self._data = zope.bforest.OOBForest(count=buckets)
-        self.rotation_interval = rotation_interval
-        self.last_rotation = datetime.datetime.now(pytz.UTC)
-
-    def add(self, item):
-        self._data[key(item)] = item
-        item.__parent__ = self
-
-    def iter(self, start=None, stop=None):
-        sources = []
-        if start is not None:
-            start = code(start)
-        if stop is not None:
-            stop = code(stop)
-        for b in self._data.buckets:
-            i = iter(b.items(start, stop))
-            try:
-                n = i.next()
-            except StopIteration:
-                pass
-            else:
-                sources.append([n, i])
-        sources.sort()
-        length = len(sources)
-        while length > 1:
-            src = sources.pop(0)
-            yield src[0][1]
-            try:
-                src[0] = src[1].next()
-            except StopIteration:
-                length -= 1
-            else:
-                bisect.insort(sources, src) # mildly interesting micro-
-                # optimisation note: this approach shaves off about 1/5 of
-                # an alternative approach that finds the lowest every time
-                # but does not insort.
-        if sources:
-            yield sources[0][0][1]
-            for k, v in sources[0][1]:
-                yield v
-
-    def __iter__(self):
-        return self._data.itervalues() # this takes more memory but the pattern
-        # is typically faster than the custom iter above (for relatively
-        # complete iterations of relatively small sets).  The custom iter
-        # has the advantage of the start and stop code.
-
-    def first(self, start=None):
-        original = start
-        if start is not None:
-            start = code(start)
-            minKey = lambda bkt: bkt.minKey(start)
-        else:
-            minKey = lambda bkt: bkt.minKey()
-        i = iter(self._data.buckets)
-        bucket = i.next()
-        try:
-            key = minKey(bucket)
-        except ValueError:
-            key = None
-        for b in i:
-            try:
-                k = minKey(b)
-            except ValueError:
-                continue
-            if key is None or k < key:
-                bucket, key = b, k
-        if key is None:
-            raise ValueError(original)
-        return bucket[key]
-
-    def last(self, stop=None):
-        original = stop
-        if stop is not None:
-            stop = code(stop)
-            maxKey = lambda bkt: bkt.maxKey(stop)
-        else:
-            maxKey = lambda bkt: bkt.maxKey()
-        i = iter(self._data.buckets)
-        bucket = i.next()
-        try:
-            key = maxKey(bucket)
-        except ValueError:
-            key = None
-        for b in i:
-            try:
-                k = maxKey(b)
-            except ValueError:
-                continue
-            if key is None or k > key:
-                bucket, key = b, k
-        if key is None:
-            raise ValueError(original)
-        return bucket[key]
-
-    def __nonzero__(self):
-        for b in self._data.buckets:
-            try:
-                iter(b).next()
-            except StopIteration:
-                pass
-            else:
-                return True
-        return False
-
-    def __len__(self):
-        return len(self._data)
-
-    def rotate(self):
-        self._data.rotateBucket()
-        self.last_rotation = datetime.datetime.now(pytz.UTC)
-
-
-class Worker(persistent.Persistent):
-    zope.interface.implements(zc.async.interfaces.IWorker)
-
-    def __init__(self, UUID, reactor_size=4, thread_size=1, poll_seconds=5,
-                 ping_interval=datetime.timedelta(minutes=1),
-                 ping_death_interval=datetime.timedelta(seconds=30)):
-        self.reactor = SizedSequence(reactor_size)
-        self.reactor.__parent__ = self
-        self.thread = SizedSequence(thread_size)
-        self.thread.__parent__ = self
-        self.engineUUID = None
-        self.UUID = UUID
-        self.poll_seconds = poll_seconds
-        self.ping_interval = ping_interval
-        self.ping_death_interval = ping_death_interval
-        self.last_ping = datetime.datetime.now(pytz.UTC)
-        self.completed = Completed()
-        self.completed.__parent__ = self

Deleted: zc.async/trunk/src/zc/async/datamanager.txt
===================================================================
--- zc.async/trunk/src/zc/async/datamanager.txt	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/datamanager.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -1,1222 +0,0 @@
-The datamanager module contains the queues that zc.async clients use to
-deposit jobs, and the queues that workers use to put jobs they are working on.
-
-The main datamanager object simply has a queue for thread jobs, a queue for
-reactor jobs, and a mapping of workers.  It starts out empty [#setUp]_.
-
-    >>> import zc.async.datamanager
-    >>> dm = root['zc.async.datamanager'] = zc.async.datamanager.DataManager()
-    >>> import transaction
-    >>> transaction.commit()
-    >>> len(dm.workers)
-    0
-    >>> len(dm.thread)
-    0
-    >>> len(dm.reactor)
-    0
-
-As shown in the README.txt of this package, the data manager will typically
-be registered as an adapter to persistent objects that provides
-zc.async.interfaces.IDataManager [#verify]_.  
-
-Workers
-=======
-
-When it is installed, workers register themselves.  Workers typically
-get their UUID from the instanceuuid module in this package, but we will
-generate our own here.
-
-    >>> import uuid
-    >>> worker1 = zc.async.datamanager.Worker(uuid.uuid1())
-    >>> res = dm.workers.add(worker1)
-    >>> dm.workers[worker1.UUID] is worker1
-    True
-    >>> res is worker1
-    True
-
-The `workers` object has a mapping read API, with `items`, `values`, `keys`,
-`__len__`, `__getitem__`, and `get` [#check_workers_mapping]_.  You remove
-workers with their UUID [#check_UUID_equivalence]_.
-
-    >>> dm.workers.remove(worker1.UUID)
-    >>> len(dm.workers)
-    0
-
-Let's add the worker back.  Notice that the __parent__ is None when it is out
-of the workers, but set to the workers object when it is inside.  Since the
-workers object also has a __parent__ reference to its parent, the data manager,
-the worker has a link back to the datamanager.
-
-    >>> worker1.__parent__ # None
-    >>> res = dm.workers.add(worker1)
-    >>> worker1.__parent__ is dm.workers
-    True
-    >>> dm.workers.__parent__ is dm
-    True
-
-Each worker has several other attributes.  We'll look at four now:
-`UUID`, which we have already seen; `thread`, a sequence of the thread
-jobs the worker is working on; `reactor`, a sequence of the reactor jobs
-the worker is working on; and `engineUUID`, a uuid of the engine that is
-in charge of running the worker, if any [#verify_worker]_.
-
-The two sequences are unusual in that they are sized: if len(sequence)
-== size, trying to put another item in the sequence raises
-zc.async.interfaces.FullError.  By default, workers have a reactor size
-of 4, and a thread size of 1.
-
-    >>> worker1.thread.size
-    1
-    >>> worker1.reactor.size
-    4
-    >>> def multiply(*args):
-    ...     res = 1
-    ...     for a in args:
-    ...         res *= a
-    ...     return res
-    ...
-    >>> import zc.async.partial
-    >>> p1 = zc.async.partial.Partial(multiply, 2, 3)
-    >>> res = worker1.thread.add(p1)
-    >>> len(worker1.thread)
-    1
-    >>> p2 = zc.async.partial.Partial(multiply, 5, 6)
-    >>> worker1.thread.add(p2)
-    ... # doctest: +ELLIPSIS
-    Traceback (most recent call last):
-    ...
-    FullError: <zc.async.datamanager.SizedSequence object at ...>
-
-You can change the queue size.
-
-    >>> worker1.thread.size = 2
-    >>> res = worker1.thread.add(p2)
-    >>> len(worker1.thread)
-    2
-
-Decreasing it beyond the current len is acceptable, and will only affect
-how many partials must be removed before new ones may be added.
-
-    >>> worker1.thread.size = 1
-    >>> len(worker1.thread)
-    2
-
-You can also set it during instantiation of a worker: `reactor_size` and
-`thread_size` are optional arguments.
-
-    >>> worker2 = zc.async.datamanager.Worker(uuid.uuid1(), 2, 1)
-    >>> worker2.reactor.size
-    2
-    >>> worker2.thread.size
-    1
-
-We'll add the second worker to the data manager.
-
-    >>> res = dm.workers.add(worker2)
-    >>> len(dm.workers)
-    2
-
-Engines claim workers by putting their UUID on them.  Initially a worker has
-no engineUUID.  We'll assign two (arbitrary) UUIDs.
-
-    >>> worker1.engineUUID
-    >>> worker1.engineUUID = uuid.uuid4()
-    >>> worker2.engineUUID = uuid.uuid4()
-
-This indicates that both workers are "open for business".  A worker without an
-engine is a dead husk.
-
-We'll look at partials in workers more a little later
-[#remove_partials]_.  Next we're going to look at partials in the
-data manager queues.
-
-Partials
-========
-
-Once a Zope has started, it will typically have at least one worker installed,
-with one virtual loop per worker checking the data manager for new jobs (see
-README.txt for integration examples).  Now client code can start requesting
-that partials be done.
-
-Basic Story
------------
-
-Simplest use is to get the data manager and add a callable.  As
-mentioned above, and demonstrated in README.txt, the typical way to get
-the data manager is to adapt a persistent context to IDataManager. We'll
-assume we already have the data manager, and that a utility providing
-zc.async.interfaces.IUUID named 'instance' is available
-[#setUp_UUID_utility]_.
-
-    >>> def send_message():
-    ...     print "imagine this sent a message to another machine"
-    ...
-    >>> p = dm.thread.put(send_message)
-
-Now p is a partial wrapping the send_message call.  It is specifically a
-data manager partial [#basic_data_manager_partial_checks]_.
-
-    >>> p.callable is send_message
-    True
-    >>> zc.async.interfaces.IDataManagerPartial.providedBy(p)
-    True
-
-The IDataManagerPartial interface extends IPartial and describes the
-interface needed for a partial added to a data manager.  Here are the
-attributes on the interface.
-
-- Set automatically:
-
-  * assignerUUID (the UUID of the software instance that put the partial in
-    the queue)
-
-  * workerUUID (the UUID of the worker who claimed the partial)
-
-  * thread (None or bool: whether the partial was assigned to a thread (True)
-    or reactor (False) queue)
-
-- Potentially set by user, not honored for callbacks:
-
-  * selectedUUIDs (the UUIDs of workers that should work on the partial, as
-    selected by user)
-
-  * excludedUUIDs (the UUIDs of workers that should not work on the partial,
-    as selected by user)
-
-  * begin_after (a datetime.datetime with pytz.UTC timezone that specifies a
-    date and time to wait till running the partial; defaults to creation
-    time)
-
-  * begin_by (a datetime.timedelta of a duration after begin_after after
-    which workers should call `fail` on the partial; defaults to one hour)
-
-These are described in some more detail on the IDataManagerPartial
-interface.
-
-The thread queue contains the partial.
-
-    >>> len(dm.thread)
-    1
-    >>> list(dm.thread) == [p]
-    True
-
-If you ask the data manager for all due jobs, it also includes the partial.
-
-    >>> list(dm.thread.iterDue()) == [p]
-    True
-
-The partial knows its __parent__ and can be used to obtain its data manager.
-
-    >>> zc.async.interfaces.IDataManager(p) is dm
-    True
-    >>> p.__parent__ is dm.thread
-    True
-
-The easiest for a worker to get a task is to call pullNext, passing its
-UUID. It will get the next available task that does not exclude it (and
-that includes it), removing it from the queue.  If nothing is available,
-return None.
-
-    >>> res = dm.thread.pullNext(worker1.UUID)
-    >>> res is p
-    True
-    >>> len(dm.thread)
-    0
-
-Once a partial has been put in a data manager, it is "claimed": trying to
-put it in another one (or back in the same one) will raise an error.
-
-    >>> dm.thread.put(p)
-    Traceback (most recent call last):
-    ...
-    ValueError: cannot add already-assigned partial
-
-If we remove the assignerUUID, we can put it back in.
-
-    >>> p.assignerUUID = None
-    >>> res = dm.thread.put(p)
-    >>> res is p
-    True
-    >>> len(dm.thread)
-    1
-    >>> transaction.commit()
-
-In normal behavior, after client code has put the task in the thread queue,
-an engine (associated with a persistent worker in a one-to-one relationship,
-in which the worker is the persistent store for the transient, per-process
-engine) will claim and perform it like this (we'll do this from the
-perspective of worker 1).
-
-    >>> trans = transaction.begin()
-    >>> import Queue
-    >>> thread_queue = Queue.Queue(0)
-    >>> claimed = dm.workers[worker1.UUID].thread
-    >>> ct = 0
-    >>> while 1:
-    ...     if len(claimed) < claimed.size:
-    ...         next = dm.thread.pullNext(worker1.UUID)
-    ...         if next is not None:
-    ...             claimed.add(next)
-    ...             database_name = next._p_jar.db().database_name
-    ...             identifier = next._p_oid
-    ...             try:
-    ...                 transaction.commit()
-    ...             except ZODB.POSException.TransactionError:
-    ...                 transaction.abort()
-    ...                 ct += 1
-    ...                 if ct < 5: # in twisted, this would probably callLater
-    ...                     continue
-    ...             else:
-    ...                 thread_queue.put((database_name, identifier))
-    ...                 ct = 0
-    ...                 continue # in twisted, this would probably callLater
-    ...     break
-    ... # doctest: +ELLIPSIS
-    ...
-    <zc.async.adapters.DataManagerPartial object at ...>
-
-Now the worker 1 has claimed it.  
-
-    >>> len(worker1.thread)
-    1
-
-A thread in that worker will begin it,
-given the database name and _p_oid of the partial it should perform, and will
-do something like this.
-
-    >>> import thread
-    >>> database_name, identifier = thread_queue.get(False)
-    >>> claimed = dm.workers[worker1.UUID].thread # this would actually open a
-    ... # connection and get the worker thread queue object by id
-    >>> for p in claimed:
-    ...     if (p._p_oid == identifier and
-    ...         p._p_jar.db().database_name == database_name):
-    ...         p.thread = thread.get_ident()
-    ...         transaction.commit()
-    ...         try:
-    ...             p()
-    ...         except ZODB.POSException.TransactionError:
-    ...             transaction.abort()
-    ...             p.fail()
-    ...         while 1:
-    ...             try:
-    ...                 claimed.remove(p)
-    ...                 claimed.__parent__.completed.add(p)
-    ...                 transaction.commit()
-    ...             except ZODB.POSException.TransactionError:
-    ...                 transaction.abort() # retry forever!
-    ...             else:
-    ...                 break
-    ...         break
-    ...
-    imagine this sent a message to another machine
-
-And look, there's our message: the partial was called.
-
-The worker's thread list is empty, and the partial has a note of what thread
-ran it.
-
-    >>> len(worker1.thread)
-    0
-    >>> p.thread == thread.get_ident()
-    True
-
-Notice also that the `completed` container now contains the partial.
-
-    >>> len(worker1.completed)
-    1
-    >>> list(worker1.completed) == [p]
-    True
-
-The API of the completed container is still in flux [#test_completed]_.
-
-For Reactors
-------------
-
-If you are into Twisted programming, use the reactor queue.  The story is
-very similar, so we'll go a bit quicker.
-
-    >>> import twisted.internet.defer
-    >>> import twisted.internet.reactor
-    >>> def twistedPartDeux(d):
-    ...     d.callback(42)
-    ...
-    >>> def doSomethingInTwisted():
-    ...     d = twisted.internet.defer.Deferred()
-    ...     twisted.internet.reactor.callLater(0, twistedPartDeux, d)
-    ...     return d
-    ...
-    >>> p = dm.reactor.put(doSomethingInTwisted)
-    >>> def arbitraryThingThatNeedsAConnection(folder, result):
-    ...     folder['result'] = result
-    ...
-    >>> p_callback = p.addCallbacks(zc.async.partial.Partial(
-    ...     arbitraryThingThatNeedsAConnection, root))
-    >>> transaction.commit()
-
-The engine might do something like this [#set_up_reactor]_.
-
-    >>> import zc.twist
-    >>> def remove(container, partial, result):
-    ...     container.remove(partial)
-    ...
-    >>> def perform(p):
-    ...     res = p()
-    ...     p.addCallback(zc.async.partial.Partial(
-    ...         remove, p.__parent__, p))
-    ...     transaction.commit()
-    ...
-    >>> trans = transaction.begin()
-    >>> claimed = dm.workers[worker2.UUID].reactor
-    >>> ct = 0
-    >>> while 1:
-    ...     if len(claimed) < claimed.size:
-    ...         next = dm.reactor.pullNext(worker2.UUID)
-    ...         if next is not None:
-    ...             claimed.add(next)
-    ...             partial = zc.twist.Partial(perform, next)
-    ...             try:
-    ...                 transaction.commit()
-    ...             except ZODB.POSException.TransactionError:
-    ...                 transaction.abort()
-    ...                 ct += 1
-    ...                 if ct < 5: # this would probably callLater really
-    ...                     continue
-    ...             else:
-    ...                 twisted.internet.reactor.callLater(0, partial)
-    ...                 ct = 0
-    ...                 continue # this would probably callLater really
-    ...     break
-    ... # doctest: +ELLIPSIS
-    ...
-    <zc.async.adapters.DataManagerPartial object at ...>
-
-Then the reactor would churn, and eventually we'd get our result.  The
-execution should be something like this (where `time_passes` represents
-one tick of the Twisted reactor that you would normally not have to call
-explicitly--this is just for demonstration purposes).
-
-    >>> time_passes() # perform and doSomethingInTwisted
-    True
-    >>> trans = transaction.begin()
-    >>> p.result # None
-    >>> len(worker2.reactor)
-    1
-    >>> time_passes() # twistedPartDeux and arbitraryThingThatNeedsAConnection
-    True
-    >>> trans = transaction.begin()
-    >>> p.result
-    42
-    >>> root['result']
-    42
-    >>> p.state == zc.async.interfaces.COMPLETED
-    True
-    >>> len(worker2.reactor)
-    0
-
-[#tear_down_reactor]_
-
-Held Calls
-----------
-
-Both of the examples so far request that jobs be done as soon as possible.
-It's also possible to request that jobs be done later.  Let's assume we
-can control the current time generated by datetime.datetime.now with a
-`set_now` callable [#set_up_datetime]_.  A partial added without any special
-calls gets a `begin_after` attribute of now.
-
-    >>> import datetime
-    >>> import pytz
-    >>> datetime.datetime.now(pytz.UTC) 
-    datetime.datetime(2006, 8, 10, 15, 44, 22, 211, tzinfo=<UTC>)
-    >>> res1 = dm.thread.put(
-    ...     zc.async.partial.Partial(multiply, 3, 6))
-    ...
-    >>> res1.begin_after
-    datetime.datetime(2006, 8, 10, 15, 44, 22, 211, tzinfo=<UTC>)
-
-This means that it's immediately ready to be performed.  `iterDue` shows this.
-
-    >>> list(dm.thread.iterDue()) == [res1]
-    True
-
-You can also specify a begin_after date when you make the call.  Then it
-isn't due immediately.
-
-    >>> res2 = dm.thread.put(
-    ...     zc.async.partial.Partial(multiply, 4, 6),
-    ...     datetime.datetime(2006, 8, 10, 16, tzinfo=pytz.UTC))
-    ...
-    >>> len(dm.thread)
-    2
-    >>> res2.begin_after
-    datetime.datetime(2006, 8, 10, 16, 0, tzinfo=<UTC>)
-    >>> list(dm.thread.iterDue()) == [res1]
-    True
-
-When the time passes, it is available.  Partials are ordered by their
-begin_after dates.
-
-    >>> set_now(datetime.datetime(2006, 8, 10, 16, 0, 0, 1, tzinfo=pytz.UTC))
-    >>> list(dm.thread.iterDue()) == [res1, res2]
-    True
-
-Pre-dating (before now) makes the item come first (or in order with other
-pre-dated items.
-
-    >>> res3 = dm.thread.put(
-    ...     zc.async.partial.Partial(multiply, 5, 6),
-    ...     begin_after=datetime.datetime(2006, 8, 10, 15, 35, tzinfo=pytz.UTC))
-    ...
-    >>> res3.begin_after
-    datetime.datetime(2006, 8, 10, 15, 35, tzinfo=<UTC>)
-    >>> list(dm.thread) == [res3, res1, res2]
-    True
-    >>> list(dm.thread.iterDue()) == [res3, res1, res2]
-    True
-
-Other timezones are normalized to UTC.
-
-    >>> res4 = dm.thread.put(
-    ...     zc.async.partial.Partial(multiply, 6, 6),
-    ...     pytz.timezone('EST').localize(
-    ...         datetime.datetime(2006, 8, 10, 11, 30)))
-    ...
-    >>> res4.begin_after
-    datetime.datetime(2006, 8, 10, 16, 30, tzinfo=<UTC>)
-    >>> list(dm.thread.iterDue()) == [res3, res1, res2]
-    True
-
-Naive timezones are not allowed.
-
-    >>> dm.thread.put(send_message, datetime.datetime(2006, 8, 10, 16, 15))
-    Traceback (most recent call last):
-    ...
-    ValueError: cannot use timezone-naive values
-
-Iteration, again, is based on begin_after, not the order added.
-
-    >>> res5 = dm.thread.put(
-    ...     zc.async.partial.Partial(multiply, 7, 6),
-    ...     datetime.datetime(2006, 8, 10, 16, 15, tzinfo=pytz.UTC))
-    ...
-    >>> list(dm.thread) == [res3, res1, res2, res5, res4]
-    True
-    >>> list(dm.thread.iterDue()) == [res3, res1, res2]
-    True
-
-pullNext only returns items that are due.
-
-    >>> dm.thread.pullNext(worker1.UUID) == res3
-    True
-    >>> dm.thread.pullNext(worker1.UUID) == res1
-    True
-    >>> dm.thread.pullNext(worker1.UUID) == res2
-    True
-    >>> dm.thread.pullNext(worker1.UUID) # None
-
-When it is due, begin_after also affects pullNext.
-
-    >>> set_now(datetime.datetime(2006, 8, 10, 16, 31, tzinfo=pytz.UTC))
-    >>> dm.thread.pullNext(worker1.UUID) == res5
-    True
-    >>> dm.thread.pullNext(worker1.UUID) == res4
-    True
-    >>> dm.thread.pullNext(worker1.UUID) # None
-
-Selecting and Excluding Workers
--------------------------------
-
-Some use cases want to limit the workers that can perform a given partial,
-either by explicitly selecting or excluding certain workers.  Here are some of
-those use cases:
-
-- You may want to divide up your workers by tasks: certain long running tasks
-  should only tie up one set of workers, so short tasks that users expect
-  more responsiveness from can use any available worker, including some that
-  are reserved for them.
-
-- You may only have the system resources necessary to perform a given task on
-  a certain set of your workers.
-
-- You may want to broadcast a message to all workers, so you need to generate
-  a task specifically for each.  This would be an interesting way to build
-  a simple but potentially powerful WSGI reverse proxy that supported
-  invalidations, for instance.
-
-There are probably more, and even these have interesting variants.  For
-instance, for the second, what if you don't know which workers are
-appropriate and need to test to find out?  You could write a partial
-that contains a partial.  The outer partial's job is to find a worker
-with an environment appropriate for the inner one.  When it runs, if the
-worker's environment is appropriate, it performs the inner partial.  If
-it is not, it creates a new outer partial wrapping its inner partial,
-specifies the current worker UUID as excluded, and schedules it to be
-called.  If the next worker is also inappropriate, in creates a third
-outer wrapper that excludes both of the failed workers' UUIDs...and so
-on.
-
-To do this, you use worker UUIDs in a partial's selectedUUIDs and
-excludedUUIDs sets.  An empty selectedUUIDs set is interpreted as a
-catch-all.  For a worker to be able to perform a partial, it must not
-be in the excludedUUIDs and either selectedUUIDs is empty or it is within
-selectedUUIDs.
-
-Let's look at some examples.  We'll assume the five partials we looked at
-above are all back in the dm.threads [#reinstate]_.  We've already looked
-at partials with empty sets for excludedUUIDs and selectedUUIDs: every
-partial we've shown up to now has fit that description.
-
-(order is [res3, res1, res2, res5, res4])
-
-    >>> res3.selectedUUIDs.add(uuid.uuid1()) # nobody here
-    >>> res1.selectedUUIDs.add(worker1.UUID)
-    >>> res2.selectedUUIDs.update((worker1.UUID, worker2.UUID))
-    >>> res2.excludedUUIDs.add(worker2.UUID)
-    >>> res5.excludedUUIDs.add(worker2.UUID)
-    >>> res4.excludedUUIDs.update((worker2.UUID, worker1.UUID))
-
-Now poor worker2 can't get any work.
-
-    >>> dm.thread.pullNext(worker2.UUID) # None
-
-worker1 can get three of them: res1, res2, and res 5.
-
-    >>> dm.thread.pullNext(worker1.UUID) is res1
-    True
-    >>> dm.thread.pullNext(worker1.UUID) is res2
-    True
-    >>> dm.thread.pullNext(worker1.UUID) is res5
-    True
-    >>> dm.thread.pullNext(worker1.UUID) # None
-
-Now we have two jobs that can never be claimed (as long as we have only
-these two workers and the selected/excluded values are not changed. 
-What happens to them?
-
-Never-Claimed Calls
--------------------
-
-Sometimes, due to impossible worker selections or exclusions, or simply
-workers that are too busy, we need to give up and say that a given partial
-will not be run, and should fail.  Partials have a begin_by attribute which
-controls approximately when this should happen.  The begin_by value should
-be a non-negative datetime.timedelta, which is added to the begin_after value
-to determine when the partial should fail.  If you put a partial in a
-data manager with no begin_by value set, the data manager sets it to one hour.
-
-    >>> res3.begin_by
-    datetime.timedelta(0, 3600)
-
-This can be changed.
-
-    >>> res3.begin_by = datetime.timedelta(hours=2)
-
-So how are they cancelled?  `pullNext` will wrap any expired partial with
-another partial that calls the inner `fail` method.  This will be handed to
-any worker.
-
-    >>> res3.begin_after + res3.begin_by
-    datetime.datetime(2006, 8, 10, 17, 35, tzinfo=<UTC>)
-    >>> res4.begin_after + res4.begin_by
-    datetime.datetime(2006, 8, 10, 17, 30, tzinfo=<UTC>)
-    >>> set_now(datetime.datetime(2006, 8, 10, 17, 32, tzinfo=pytz.UTC))
-    >>> p = dm.thread.pullNext(worker1.UUID)
-    >>> p()
-    >>> res4.state == zc.async.interfaces.COMPLETED
-    True
-    >>> print res4.result.getTraceback()
-    ... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
-    Traceback (most recent call last):
-    ...
-    zc.async.interfaces.AbortedError:
-    >>> dm.thread.pullNext(worker1.UUID) # None
-
-    >>> set_now(datetime.datetime(2006, 8, 10, 17, 37, tzinfo=pytz.UTC))
-    >>> p = dm.thread.pullNext(worker1.UUID)
-    >>> p()
-    >>> res3.state == zc.async.interfaces.COMPLETED
-    True
-    >>> print res3.result.getTraceback()
-    ... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
-    Traceback (most recent call last):
-    ...
-    zc.async.interfaces.AbortedError:
-    >>> dm.thread.pullNext(worker1.UUID) # None
-    >>> len(dm.thread)
-    0
-
-Dead Workers
-------------
-
-What happens when an engine, driving a worker, dies?  If it is the only
-engine/worker, that's the end: when the worker restarts it should clean
-out its old worker object and then proceed.  But what if there are more
-than one simultaneous worker?  How do we know to clean out the dead
-workers jobs?
-
-In addition to the jobs listed above, each worker virtual main loop has
-an additional task: be his brother's keeper.  Each must update a ping
-date on its worker object at a given maximum interval, and check the
-next sibling.  To support this story, the worker objects have a few more
-attributes we haven't talked about: `poll_seconds`, `ping_interval`,
-`ping_death_interval` and `last_ping`.
-
-    >>> worker1.poll_seconds
-    5
-    >>> worker1.ping_interval
-    datetime.timedelta(0, 60)
-    >>> worker1.ping_death_interval
-    datetime.timedelta(0, 30)
-
-`pullNext` on a thread queue will return a partial to clean a worker when the
-next highest one by UUID (circling around to the lowest one when the uuid
-is the highest).  So let's set a ping on worker1.
-
-    >>> worker1.last_ping = datetime.datetime.now(pytz.UTC)
-    >>> worker1.last_ping
-    datetime.datetime(2006, 8, 10, 17, 37, tzinfo=<UTC>)
-
-Let's put res1, res2, res3, res4, and res5 in worker1.
-
-    >>> len(worker1.thread)
-    0
-    >>> worker1.thread.size = 3
-    >>> res1.excludedUUIDs.clear()
-    >>> res1.selectedUUIDs.clear()
-    >>> r = worker1.thread.add(res1) # PENDING
-    >>> res2._state = zc.async.interfaces.ACTIVE
-    >>> r = worker1.reactor.add(res2)
-    >>> r = worker1.thread.add(res3) # COMPLETED
-    >>> r = worker1.reactor.add(res4) # COMPLETED
-    >>> res5._state = zc.async.interfaces.CALLBACKS
-    >>> res5._result = res5.callable(*res5.args, **dict(res5.kwargs))
-    >>> r = worker1.thread.add(res5)
-
-While we are still within our acceptable time period, the `checkSibling`
-method will not do anything.
-
-    >>> len(dm.workers)
-    2
-    >>> len(dm.thread)
-    0
-    >>> set_now(worker1.last_ping + worker1.ping_interval)
-    >>> dm.checkSibling(worker2.UUID)
-    >>> len(dm.workers)
-    2
-    >>> len(dm.thread)
-    0
-
-We need to move now to after last_ping + ping_interval +
-ping_death_interval. Now when worker2 calls checkSibling on the data
-manager, worker1 will have engineUUID set to None, and a partial will be
-added to clean out the partials in worker 1.
-
-    >>> set_now(worker1.last_ping + worker1.ping_interval +
-    ...         worker1.ping_death_interval + datetime.timedelta(seconds=1))
-    >>> worker1.engineUUID is not None
-    True
-    >>> worker2.engineUUID is not None
-    True
-    >>> dm.checkSibling(worker2.UUID)
-    >>> len(dm.workers)
-    2
-    >>> worker1.engineUUID is not None
-    False
-    >>> worker2.engineUUID is not None
-    True
-    >>> len(dm.thread)
-    1
-
-So worker2 can get the job and perform it.
-
-    >>> res = dm.thread.pullNext(worker2.UUID)
-    >>> partial = worker2.thread.add(res)
-    >>> len(worker1.thread)
-    3
-    >>> len(worker1.reactor)
-    2
-    >>> res()
-    >>> len(worker1.thread)
-    0
-    >>> len(worker1.reactor)
-    0
-    >>> r = dm.thread.pullNext(worker2.UUID)()
-    >>> r = dm.thread.pullNext(worker2.UUID)()
-    >>> r = dm.reactor.pullNext(worker2.UUID)()
-    >>> dm.thread.pullNext(worker2.UUID) # None
-    >>> dm.reactor.pullNext(worker2.UUID) # None
-    
-    >>> res1.state == zc.async.interfaces.COMPLETED
-    True
-    >>> res2.state == zc.async.interfaces.COMPLETED
-    True
-    >>> res3.state == zc.async.interfaces.COMPLETED
-    True
-    >>> res4.state == zc.async.interfaces.COMPLETED
-    True
-    >>> res5.state == zc.async.interfaces.COMPLETED
-    True
-
-If you have multiple workers, it is strongly suggested that you get the
-associated servers connected to a shared time server.
-
-[#tear_down_datetime]_
-
-=========
-Footnotes
-=========
-
-.. [#setUp] We'll actually create the state that the text needs here.
-
-    >>> from ZODB.tests.util import DB
-    >>> db = DB()
-    >>> conn = db.open()
-    >>> root = conn.root()
-
-    You must have two adapter registrations: IConnection to
-    ITransactionManager, and IPersistent to IConnection.  We will also
-    register IPersistent to ITransactionManager because the adapter is
-    designed for it.
-
-    >>> from zc.twist import transactionManager, connection
-    >>> import zope.component
-    >>> zope.component.provideAdapter(transactionManager)
-    >>> zope.component.provideAdapter(connection)
-    >>> import ZODB.interfaces
-    >>> zope.component.provideAdapter(
-    ...     transactionManager, adapts=(ZODB.interfaces.IConnection,))
-
-    We need to be able to get data manager partials for functions and methods;
-    normal partials for functions and methods; and a data manager for a partial.
-    Here are the necessary registrations.
-
-    >>> import zope.component
-    >>> import types
-    >>> import zc.async.interfaces
-    >>> import zc.async.partial
-    >>> import zc.async.adapters
-    >>> zope.component.provideAdapter(
-    ...     zc.async.adapters.method_to_datamanagerpartial)
-    >>> zope.component.provideAdapter(
-    ...     zc.async.adapters.function_to_datamanagerpartial)
-    >>> zope.component.provideAdapter( # partial -> datamanagerpartial
-    ...     zc.async.adapters.DataManagerPartial,
-    ...     provides=zc.async.interfaces.IDataManagerPartial)
-    >>> zope.component.provideAdapter(
-    ...     zc.async.adapters.partial_to_datamanager)
-    >>> zope.component.provideAdapter(
-    ...     zc.async.partial.Partial,
-    ...     adapts=(types.FunctionType,),
-    ...     provides=zc.async.interfaces.IPartial)
-    >>> zope.component.provideAdapter(
-    ...     zc.async.partial.Partial,
-    ...     adapts=(types.MethodType,),
-    ...     provides=zc.async.interfaces.IPartial)
-    ...
-
-.. [#verify] Verify data manager interface.
-
-    >>> from zope.interface.verify import verifyObject
-    >>> verifyObject(zc.async.interfaces.IDataManager, dm)
-    True
-    >>> verifyObject(zc.async.interfaces.IPartialQueue, dm.thread)
-    True
-    >>> verifyObject(zc.async.interfaces.IPartialQueue, dm.reactor)
-    True
-    >>> verifyObject(zc.async.interfaces.IWorkers, dm.workers)
-    True
-
-.. [#check_workers_mapping]
-
-    >>> len(dm.workers)
-    1
-    >>> list(dm.workers.keys()) == [worker1.UUID]
-    True
-    >>> list(dm.workers) == [worker1.UUID]
-    True
-    >>> list(dm.workers.values()) == [worker1]
-    True
-    >>> list(dm.workers.items()) == [(worker1.UUID, worker1)]
-    True
-    >>> dm.workers.get(worker1.UUID) is worker1
-    True
-    >>> dm.workers.get(2) is None
-    True
-    >>> dm.workers[worker1.UUID] is worker1
-    True
-    >>> dm.workers[2]
-    Traceback (most recent call last):
-    ...
-    KeyError: 2
-
-.. [#check_UUID_equivalence] This is paranoid--it should be the responsibility
-    of the uuid.UUID class--but we'll check it anyway.
-
-     >>> equivalent_UUID = uuid.UUID(bytes=worker1.UUID.bytes)
-     >>> dm.workers[equivalent_UUID] is worker1
-     True
-     >>> dm.workers.remove(equivalent_UUID)
-     >>> len(dm.workers)
-     0
-     >>> res = dm.workers.add(worker1)
-
-.. [#verify_worker]
-
-    >>> verifyObject(zc.async.interfaces.IWorker, worker1)
-    True
-    >>> verifyObject(zc.async.interfaces.ISizedSequence, worker1.thread)
-    True
-    >>> verifyObject(zc.async.interfaces.ISizedSequence, worker1.reactor)
-    True
-    >>> isinstance(worker1.UUID, uuid.UUID)
-    True
-
-.. [#remove_partials] We can remove our partials from a worker with
-    `remove`.
-
-    >>> worker1.thread.remove(p1)
-    >>> len(worker1.thread)
-    1
-    >>> list(worker1.thread) == [p2]
-    True
-    >>> worker1.thread.remove(p2)
-    >>> len(worker1.thread)
-    0
-
-    The remove method of the worker thread and reactor sequences
-    raises IndexError if you ask for the index of something that isn't
-    contained.
-
-    >>> worker1.thread.remove((2, 4)) # an iterable can surprise some
-    ... # naive string replacements, so we use this to verify we didn't
-    ... # fall into that trap.
-    Traceback (most recent call last):
-    ...
-    ValueError: (2, 4) not in queue
-
-.. [#setUp_UUID_utility] We need to provide an IUUID utility that
-    identifies the current instance.
-
-    >>> import uuid
-    >>> zope.interface.classImplements(uuid.UUID, zc.async.interfaces.IUUID)
-    >>> zope.component.provideUtility(
-    ...     worker1.UUID, zc.async.interfaces.IUUID, 'instance')
-
-    Normally this would be the UUID instance in zc.async.instanceuuid.
-
-    While we're at it, we'll get "now" so we can compare it in the footnote
-    below.
-
-    >>> import datetime
-    >>> import pytz
-    >>> _before = datetime.datetime.now(pytz.UTC)
-
-.. [#basic_data_manager_partial_checks] Even though the functionality checks
-    belong elsewhere, here are a few default checks for the values.
-
-    >>> verifyObject(zc.async.interfaces.IDataManagerPartial, p)
-    True
-    >>> p.workerUUID # None
-    >>> isinstance(p.assignerUUID, uuid.UUID)
-    True
-    >>> p.selectedUUIDs
-    zc.set.Set([])
-    >>> p.excludedUUIDs
-    zc.set.Set([])
-    >>> _before <= p.begin_after <= datetime.datetime.now(pytz.UTC)
-    True
-    >>> p.begin_by
-    datetime.timedelta(0, 3600)
-    >>> p.thread # None
-
-.. [#test_completed] Here are some nitty-gritty tests of the completed
-    container.
-
-    >>> verifyObject(zc.async.interfaces.ICompletedCollection,
-    ...              worker1.completed)
-    True
-    >>> bool(worker1.completed)
-    True
-    >>> len(worker2.completed)
-    0
-    >>> bool(worker2.completed)
-    False
-    >>> list(worker1.completed.iter()) == [p]
-    True
-    >>> list(worker1.completed.iter(p.begin_after)) == [p]
-    True
-    >>> list(worker1.completed.iter(
-    ...     p.begin_after - datetime.timedelta(seconds=1))) == []
-    True
-    >>> list(worker2.completed) == []
-    True
-    >>> list(worker2.completed.iter()) == []
-    True
-    >>> worker1.completed.first() is p
-    True
-    >>> worker1.completed.last() is p
-    True
-    >>> worker2.completed.first()
-    Traceback (most recent call last):
-    ...
-    ValueError: None
-    >>> worker2.completed.last()
-    Traceback (most recent call last):
-    ...
-    ValueError: None
-    >>> worker1.completed.rotate()
-    >>> worker1.completed.first() is p
-    True
-    >>> worker1.completed.rotate()
-    >>> worker1.completed.first() is p
-    True
-    >>> worker1.completed.rotate()
-    >>> worker1.completed.first() is p
-    True
-    >>> worker1.completed.rotate()
-    >>> worker1.completed.first() is p
-    True
-    >>> worker1.completed.rotate()
-    >>> worker1.completed.first() is p
-    True
-    >>> worker1.completed.rotate()
-    >>> worker1.completed.first()
-    Traceback (most recent call last):
-    ...
-    ValueError: None
-
-    Let's look at the completed collection with a few more partials in it.
-    The rotation means we can look at its behavior as the underlying buckets
-    are rotated out.
-
-    >>> root['coll_p0'] = zc.async.interfaces.IDataManagerPartial(send_message)
-    >>> root['coll_p1'] = zc.async.interfaces.IDataManagerPartial(send_message)
-    >>> root['coll_p2'] = zc.async.interfaces.IDataManagerPartial(send_message)
-    >>> root['coll_p3'] = zc.async.interfaces.IDataManagerPartial(send_message)
-    >>> root['coll_p4'] = zc.async.interfaces.IDataManagerPartial(send_message)
-    >>> root['coll_p5'] = zc.async.interfaces.IDataManagerPartial(send_message)
-    >>> root['coll_p6'] = zc.async.interfaces.IDataManagerPartial(send_message)
-    >>> root['coll_p7'] = zc.async.interfaces.IDataManagerPartial(send_message)
-    >>> root['coll_p8'] = zc.async.interfaces.IDataManagerPartial(send_message)
-    >>> root['coll_p9'] = zc.async.interfaces.IDataManagerPartial(send_message)
-    >>> root['coll_p0'].begin_after = datetime.datetime(
-    ...     2006, 1, 1, tzinfo=pytz.UTC)
-    >>> root['coll_p1'].begin_after = datetime.datetime(
-    ...     2006, 2, 1, tzinfo=pytz.UTC)
-    >>> root['coll_p2'].begin_after = datetime.datetime(
-    ...     2006, 3, 1, tzinfo=pytz.UTC)
-    >>> root['coll_p3'].begin_after = datetime.datetime(
-    ...     2006, 4, 1, tzinfo=pytz.UTC)
-    >>> root['coll_p4'].begin_after = datetime.datetime(
-    ...     2006, 5, 1, tzinfo=pytz.UTC)
-    >>> root['coll_p5'].begin_after = datetime.datetime(
-    ...     2006, 6, 1, tzinfo=pytz.UTC)
-    >>> root['coll_p6'].begin_after = datetime.datetime(
-    ...     2006, 7, 1, tzinfo=pytz.UTC)
-    >>> root['coll_p7'].begin_after = datetime.datetime(
-    ...     2006, 8, 1, tzinfo=pytz.UTC)
-    >>> root['coll_p8'].begin_after = datetime.datetime(
-    ...     2006, 8, 2, tzinfo=pytz.UTC)
-    >>> root['coll_p9'].begin_after = datetime.datetime(
-    ...     2006, 8, 3, tzinfo=pytz.UTC)
-    >>> transaction.commit()
-    >>> worker1.completed.add(root['coll_p8'])
-    >>> worker1.completed.add(root['coll_p6'])
-    >>> worker1.completed.rotate()
-    >>> worker1.completed.rotate()
-    >>> worker1.completed.add(root['coll_p4'])
-    >>> worker1.completed.add(root['coll_p2'])
-    >>> worker1.completed.rotate()
-    >>> worker1.completed.add(root['coll_p0'])
-    >>> worker1.completed.add(root['coll_p1'])
-    >>> worker1.completed.rotate()
-    >>> worker1.completed.add(root['coll_p3'])
-    >>> worker1.completed.add(root['coll_p5'])
-    >>> worker1.completed.rotate()
-    >>> worker1.completed.add(root['coll_p7'])
-    >>> worker1.completed.add(root['coll_p9'])
-    >>> list(worker1.completed) == [
-    ...     root['coll_p9'], root['coll_p8'], root['coll_p7'], root['coll_p6'],
-    ...     root['coll_p5'], root['coll_p4'], root['coll_p3'], root['coll_p2'],
-    ...     root['coll_p1'], root['coll_p0']]
-    True
-    >>> len(worker1.completed)
-    10
-
-    The `iter` method can simply work like __iter__, but can also take starts
-    and stops for relatively efficient jumps.
-
-    >>> list(worker1.completed.iter()) == [
-    ...     root['coll_p9'], root['coll_p8'], root['coll_p7'], root['coll_p6'],
-    ...     root['coll_p5'], root['coll_p4'], root['coll_p3'], root['coll_p2'],
-    ...     root['coll_p1'], root['coll_p0']]
-    True
-    >>> list(worker1.completed.iter(start=datetime.datetime(
-    ...     2006, 7, 15, tzinfo=pytz.UTC))) == [
-    ...     root['coll_p6'],
-    ...     root['coll_p5'], root['coll_p4'], root['coll_p3'], root['coll_p2'],
-    ...     root['coll_p1'], root['coll_p0']]
-    True
-    >>> list(worker1.completed.iter(start=datetime.datetime(
-    ...     2006, 7, 1, tzinfo=pytz.UTC))) == [
-    ...     root['coll_p6'],
-    ...     root['coll_p5'], root['coll_p4'], root['coll_p3'], root['coll_p2'],
-    ...     root['coll_p1'], root['coll_p0']]
-    True
-    >>> list(worker1.completed.iter(stop=datetime.datetime(
-    ...     2006, 7, 15, tzinfo=pytz.UTC))) == [
-    ...     root['coll_p9'], root['coll_p8'], root['coll_p7']]
-    True
-    >>> list(worker1.completed.iter(stop=datetime.datetime(
-    ...     2006, 7, 1, tzinfo=pytz.UTC))) == [
-    ...     root['coll_p9'], root['coll_p8'], root['coll_p7']]
-    True
-    >>> list(worker1.completed.iter(stop=datetime.datetime(
-    ...     2006, 6, 30, tzinfo=pytz.UTC))) == [
-    ...     root['coll_p9'], root['coll_p8'], root['coll_p7'], root['coll_p6']]
-    True
-    >>> list(worker1.completed.iter(start=datetime.datetime(
-    ...     2006, 7, 1, tzinfo=pytz.UTC), stop=datetime.datetime(
-    ...     2006, 3, 1, tzinfo=pytz.UTC))) == [
-    ...     root['coll_p6'], root['coll_p5'], root['coll_p4'], root['coll_p3']]
-    True
-    
-    `first` and `last` give you the ability to find limits including given
-    start and stop points, respectively.
-    
-    >>> worker1.completed.first() == root['coll_p9']
-    True
-    >>> worker1.completed.last() == root['coll_p0']
-    True
-    >>> worker1.completed.first(
-    ...     datetime.datetime(2006, 7, 15, tzinfo=pytz.UTC)) == (
-    ...     root['coll_p6'])
-    True
-    >>> worker1.completed.last(
-    ...     datetime.datetime(2006, 7, 15, tzinfo=pytz.UTC)) == (
-    ...     root['coll_p7'])
-    True
-
-    As you rotate the completed container, older items disappear.
-
-    >>> worker1.completed.rotate()
-    >>> list(worker1.completed.iter()) == [
-    ...     root['coll_p9'], root['coll_p7'],
-    ...     root['coll_p5'], root['coll_p4'], root['coll_p3'], root['coll_p2'],
-    ...     root['coll_p1'], root['coll_p0']]
-    True
-    >>> worker1.completed.rotate() # no change
-    >>> list(worker1.completed.iter()) == [
-    ...     root['coll_p9'], root['coll_p7'],
-    ...     root['coll_p5'], root['coll_p4'], root['coll_p3'], root['coll_p2'],
-    ...     root['coll_p1'], root['coll_p0']]
-    True
-    >>> worker1.completed.rotate()
-    >>> list(worker1.completed.iter()) == [
-    ...     root['coll_p9'], root['coll_p7'],
-    ...     root['coll_p5'], root['coll_p3'],
-    ...     root['coll_p1'], root['coll_p0']]
-    True
-    >>> worker1.completed.rotate()
-    >>> list(worker1.completed.iter()) == [
-    ...     root['coll_p9'], root['coll_p7'],
-    ...     root['coll_p5'], root['coll_p3']]
-    True
-    >>> worker1.completed.rotate()
-    >>> list(worker1.completed.iter()) == [
-    ...     root['coll_p9'], root['coll_p7']]
-    True
-    >>> worker1.completed.rotate()
-    >>> list(worker1.completed.iter()) == []
-    True
-    >>> transaction.commit()
-
-.. [#set_up_reactor] We monkeypatch twisted.internet.reactor
-    (and replace it below).
-
-    >>> import twisted.internet.reactor
-    >>> oldCallLater = twisted.internet.reactor.callLater
-    >>> import bisect
-    >>> class FauxReactor(object):
-    ...     def __init__(self):
-    ...         self.time = 0
-    ...         self.calls = []
-    ...     def callLater(self, delay, callable, *args, **kw):
-    ...         res = (delay + self.time, callable, args, kw)
-    ...         bisect.insort(self.calls, res)
-    ...         # normally we're supposed to return something but not needed
-    ...     def time_flies(self, time):
-    ...         end = self.time + time
-    ...         ct = 0
-    ...         while self.calls and self.calls[0][0] <= end:
-    ...             self.time, callable, args, kw = self.calls.pop(0)
-    ...             callable(*args, **kw) # normally this would get try...except
-    ...             ct += 1
-    ...         self.time = end
-    ...         return ct
-    ...     def time_passes(self):
-    ...         if self.calls and self.calls[0][0] <= self.time:
-    ...             self.time, callable, args, kw = self.calls.pop(0)
-    ...             callable(*args, **kw)
-    ...             return True
-    ...         return False
-    ...
-    >>> faux = FauxReactor()
-    >>> twisted.internet.reactor.callLater = faux.callLater
-    >>> time_flies = faux.time_flies
-    >>> time_passes = faux.time_passes
-
-.. [#tear_down_reactor]
-
-    >>> twisted.internet.reactor.callLater = oldCallLater
-
-.. [#set_up_datetime] A monkeypatch, removed in another footnote below.
-
-    >>> import datetime
-    >>> import pytz
-    >>> old_datetime = datetime.datetime
-    >>> def set_now(dt):
-    ...     global _now
-    ...     _now = _datetime(*dt.__reduce__()[1])
-    ...
-    >>> class _datetime(old_datetime):
-    ...     @classmethod
-    ...     def now(klass, tzinfo=None):
-    ...         if tzinfo is None:
-    ...             return _now.replace(tzinfo=None)
-    ...         else:
-    ...             return _now.astimezone(tzinfo)
-    ...     def astimezone(self, tzinfo):
-    ...         return _datetime(
-    ...             *super(_datetime,self).astimezone(tzinfo).__reduce__()[1])
-    ...     def replace(self, *args, **kwargs):
-    ...         return _datetime(
-    ...             *super(_datetime,self).replace(
-    ...                 *args, **kwargs).__reduce__()[1])
-    ...     def __repr__(self):
-    ...         raw = super(_datetime, self).__repr__()
-    ...         return "datetime.datetime%s" % (
-    ...             raw[raw.index('('):],)
-    ...     def __reduce__(self):
-    ...         return (argh, super(_datetime, self).__reduce__()[1])
-    >>> def argh(*args, **kwargs):
-    ...     return _datetime(*args, **kwargs)
-    ...
-    >>> datetime.datetime = _datetime
-    >>> _now = datetime.datetime(2006, 8, 10, 15, 44, 22, 211, pytz.UTC)
-
-.. [#reinstate]
-
-    >>> for p in (res1, res2, res3, res4, res5):
-    ...     p.assignerUUID = None
-    ...     res = dm.thread.put(p)
-    ...
-    >>> list(dm.thread) == [res3, res1, res2, res5, res4]
-    True
-    >>> list(dm.thread.iterDue()) == [res3, res1, res2, res5, res4]
-    True
-
-.. [#tear_down_datetime]
-
-    >>> datetime.datetime = old_datetime

Copied: zc.async/trunk/src/zc/async/dispatcher.py (from rev 85211, zc.async/branches/dev/src/zc/async/dispatcher.py)
===================================================================
--- zc.async/trunk/src/zc/async/dispatcher.py	                        (rev 0)
+++ zc.async/trunk/src/zc/async/dispatcher.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,716 @@
+import time
+import datetime
+import bisect
+import Queue
+import thread
+import threading
+
+import twisted.python.failure
+import twisted.internet.defer
+import ZODB.POSException
+import BTrees
+import transaction
+import transaction.interfaces
+import zope.component
+import zope.bforest.periodic
+import zc.twist
+
+import zc.async.utils
+import zc.async.interfaces
+
+def _get(reactor, job, name, default, timeout, poll, deferred, start=None):
+    now = time.time()
+    if start is None:
+        start = now
+    if name in job.annotations:
+        res = job.annotations[name]
+    elif start + timeout < now:
+        res = default
+    else:
+        partial = zc.twist.Partial(
+            _get, reactor, job, name, default, timeout, poll, deferred,
+            start)
+        partial.setReactor(reactor)
+        reactor.callLater(min(poll, start + timeout - now), partial)
+        return
+    deferred.setResult(res)
+
+class Result(object):
+
+    result = None
+
+    def __init__(self):
+        self._event = threading.Event()
+    
+    def setResult(self, value):
+        self.result = value
+        self._event.set()
+
+    def wait(self, *args):
+        self._event.wait(*args)
+
+class Local(threading.local):
+
+    job = None
+    dispatcher = None
+
+    def getJob(self):
+        return self.job
+
+    def getDispatcher(self):
+        return self.dispatcher
+
+    def getReactor(self):
+        return self.dispatcher.reactor
+
+    def setLiveAnnotation(self, name, value, job=None):
+        if self.job is None or self.dispatcher.reactor is None:
+            raise ValueError('not initialized')
+        if job is None:
+            job = self.job
+        partial = zc.twist.Partial(
+            job.annotations.__setitem__, name, value)
+        partial.setReactor(self.dispatcher.reactor)
+        self.dispatcher.reactor.callFromThread(partial)
+
+    def getLiveAnnotation(self, name, default=None, timeout=0,
+                          poll=1, job=None):
+        if self.job is None or self.dispatcher.reactor is None:
+            raise ValueError('not initialized')
+        if job is None:
+            job = self.job
+        deferred = Result()
+        partial = zc.twist.Partial(
+            _get, self.dispatcher.reactor, job, name, default, timeout, poll,
+            deferred)
+        partial.setReactor(self.dispatcher.reactor)
+        self.dispatcher.reactor.callFromThread(partial)
+        deferred.wait(timeout+2)
+        return deferred.result
+
+local = Local()
+
+
+class PollInfo(dict):
+    key = None
+    @property
+    def utc_timestamp(self):
+        if self.key is not None:
+            return zc.async.utils.long_to_dt(self.key)
+
+
+class AgentThreadPool(object):
+
+    _size = 0
+
+    def __init__(self, dispatcher, name, size):
+        self.dispatcher = dispatcher
+        self.name = name
+        self.queue = Queue.Queue(0)
+        self._threads = []
+        self.setSize(size)
+
+    def getSize(self):
+        return self._size
+
+    def perform_thread(self):
+        local.dispatcher = self.dispatcher
+        try:
+            job = self.queue.get()
+            while job is not None:
+                db, identifier, info = job
+                info['thread'] = thread.get_ident()
+                info['started'] = datetime.datetime.utcnow()
+                zc.async.utils.tracelog.info(
+                    'starting in thread %d: %r',
+                    info['thread'], info['call'])
+                conn = db.open()
+                try:
+                    transaction.begin()
+                    job = conn.get(identifier)
+                    local.job = job
+                    try:
+                        job() # this does the committing and retrying, largely
+                    except ZODB.POSException.TransactionError:
+                        transaction.abort()
+                        while 1:
+                            try:
+                                job.fail()
+                                transaction.commit()
+                            except ZODB.POSException.TransactionError:
+                                transaction.abort() # retry forever (!)
+                            else:
+                                break
+                    # should come before 'completed' for threading dance
+                    if isinstance(job.result, twisted.python.failure.Failure):
+                        info['failed'] = True
+                        info['result'] = job.result.getTraceback(
+                            elideFrameworkCode=True, detail='verbose')
+                    else:
+                        info['result'] = repr(job.result)
+                    info['completed'] = datetime.datetime.utcnow()
+                finally:
+                    local.job = None
+                    transaction.abort()
+                    conn.close()
+                if info['failed']:
+                    log = zc.async.utils.tracelog.error
+                else:
+                    log = zc.async.utils.tracelog.info
+                log(
+                    '%s %s in thread %d with result:\n%s',
+                    info['call'],
+                    info['failed'] and 'failed' or 'succeeded',
+                    info['thread'], info['result'])
+                job = self.queue.get()
+        finally:
+            if self.dispatcher.activated:
+                # this may cause some bouncing, but we don't ever want to end
+                # up with fewer than needed.
+                self.dispatcher.reactor.callFromThread(self.setSize)
+    
+    def setSize(self, size=None):
+        # this should only be called from the thread in which the reactor runs
+        # (otherwise it needs locks)
+        old = self._size
+        if size is None:
+            size = old
+        else:
+            self._size = size
+        res = []
+        ct = 0
+        for t in self._threads:
+            if t.isAlive():
+                res.append(t)
+                ct += 1
+        self._threads[:] = res
+        if ct < size:
+            for i in range(max(size - ct, 0)):
+                t = threading.Thread(target=self.perform_thread)
+                t.setDaemon(True)
+                self._threads.append(t)
+                t.start()
+        elif ct > size:
+            # this may cause some bouncing, but hopefully nothing too bad.
+            for i in range(ct - size):
+                self.queue.put(None)
+        return size - old # size difference
+
+# this is mostly for testing
+
+_dispatchers = {}
+
+def get(uuid=None, default=None):
+    if uuid is None:
+        uuid = zope.component.getUtility(zc.async.interfaces.IUUID)
+    return _dispatchers.get(uuid, default)
+
+def pop(uuid=None):
+    if uuid is None:
+        uuid = zope.component.getUtility(zc.async.interfaces.IUUID)
+    return _dispatchers.pop(uuid)
+
+clear = _dispatchers.clear
+
+class Dispatcher(object):
+
+    activated = False
+    conn = None
+
+    def __init__(self, db, reactor, poll_interval=5, uuid=None):
+        if uuid is None:
+            uuid = zope.component.getUtility(zc.async.interfaces.IUUID)
+        if uuid in _dispatchers:
+            raise ValueError('dispatcher for this UUID is already registered')
+        _dispatchers[uuid] = self
+        self.db = db
+        self.reactor = reactor # we may allow the ``reactor`` argument to be
+        # None at some point, to default to the installed Twisted reactor.
+        self.poll_interval = poll_interval
+        self.UUID = uuid
+        # we keep these small so that memory usage doesn't balloon too big.
+        # for polls, about 10 minutes at 5 seconds a poll with a fairly large
+        # poll size of maybe 300 bytes means 12 polls/minute, or 120 polls,
+        # * 300 == 36000, about 36K.  Not too bad.  Jobs can take much more
+        # memory depending on the result--a failure takes a lot of memory, for
+        # instance--and there's no real way to guess how many we would get in
+        # a given period of time.  With a wild guess of an average of a K per
+        # job, and storage of 20 minutes, we would get 240K for 12 jobs a
+        # minute, or 1.2M for a job a second, and so on.  That's much bigger,
+        # but we still have a long way to go before we have noticeable memory
+        # consumption on typical production machines.
+        # We keep jobs longer than polls because you may want to find out
+        # about active jobs in a given poll, and jobs will begin their
+        # timeout period when they are begun, so we give a bit of cushion.
+        self.polls = zc.async.utils.Periodic(
+            period=datetime.timedelta(minutes=10), buckets=5) # max of 12.5 min
+        self.jobs = zope.bforest.periodic.OOBForest(
+            period=datetime.timedelta(minutes=20), count=9) # max of 22.5 min
+        self._activated = set()
+        self.queues = {}
+        self.dead_pools = []
+
+    def _getJob(self, agent):
+        try:
+            job = agent.claimJob()
+        except zc.twist.EXPLOSIVE_ERRORS:
+            transaction.abort()
+            raise
+        except:
+            transaction.abort()
+            zc.async.utils.log.error(
+                'Error trying to get job for UUID %s from '
+                'agent %s (oid %s) in queue %s (oid %s)', 
+                self.UUID, agent.name, agent._p_oid,
+                agent.queue.name,
+                agent.queue._p_oid, exc_info=True)
+            return zc.twist.Failure()
+        res = self._commit(
+            'Error trying to commit getting a job for UUID %s from '
+            'agent %s (oid %s) in queue %s (oid %s)' % (
+            self.UUID, agent.name, agent._p_oid,
+            agent.queue.name,
+            agent.queue._p_oid))
+        if res is None:
+            # Successful commit
+            res = job
+        return res
+
+    def _commit(self, debug_string=''):
+        retry = 0
+        while 1:
+            try:
+                transaction.commit()
+            except ZODB.POSException.TransactionError:
+                transaction.abort()
+                if retry >= 5:
+                    zc.async.utils.log.error(
+                        'Repeated transaction error trying to commit in '
+                        'zc.async: %s', 
+                        debug_string, exc_info=True)
+                    return zc.twist.Failure()
+                retry += 1
+            except zc.twist.EXPLOSIVE_ERRORS:
+                transaction.abort()
+                raise
+            except:
+                transaction.abort()
+                zc.async.utils.log.error(
+                    'Error trying to commit: %s', 
+                    debug_string, exc_info=True)
+                return zc.twist.Failure()
+            else:
+                break
+
+    def poll(self):
+        poll_info = PollInfo()
+        started_jobs = []
+        transaction.begin() # sync and clear
+        try:
+            queues = self.conn.root().get(zc.async.interfaces.KEY)
+            if queues is None:
+                transaction.abort()
+                return
+            for queue in queues.values():
+                poll_info[queue.name] = None
+                if self.UUID not in queue.dispatchers:
+                    queue.dispatchers.register(self.UUID)
+                da = queue.dispatchers[self.UUID]
+                if queue._p_oid not in self._activated:
+                    if da.activated:
+                        if da.dead:
+                            da.deactivate()
+                        else:
+                            zc.async.utils.log.error(
+                                'UUID %s already activated in queue %s '
+                                '(oid %s): another process?  To stop '
+                                'poll attempts in this process, set '
+                                '``zc.async.dispatcher.get().activated = '
+                                "False``.  To stop polls permanently, don't "
+                                'start a zc.async.dispatcher!',
+                                self.UUID, queue.name, queue._p_oid)
+                            continue
+                    da.activate()
+                    self._activated.add(queue._p_oid)
+                    # removed below if transaction fails
+                    res = self._commit(
+                        'Error trying to commit activation of UUID %s in '
+                        'queue %s (oid %s)' % (
+                            self.UUID, queue.name, queue._p_oid))
+                    if res is not None:
+                        self._activated.remove(queue._p_oid)
+                        continue
+                queue_info = poll_info[queue.name] = {}
+                pools = self.queues.get(queue.name)
+                if pools is None:
+                    pools = self.queues[queue.name] = {}
+                for name, agent in da.items():
+                    job_info = []
+                    active_jobs = [
+                        (job._p_oid,
+                         getattr(job._p_jar.db(), 'database_name', None))
+                         for job in agent]
+                    agent_info = queue_info[name] = {
+                        'size': None, 'len': None, 'error': None,
+                        'new jobs': job_info, 'active jobs': active_jobs}
+                    try:
+                        agent_info['size'] = agent.size
+                        agent_info['len'] = len(agent)
+                    except zc.twist.EXPLOSIVE_ERRORS:
+                        raise
+                    except:
+                        agent_info['error'] = zc.twist.Failure()
+                        transaction.abort()
+                        continue
+                    pool = pools.get(name)
+                    if pool is None:
+                        pool = pools[name] = AgentThreadPool(
+                            self, name, agent_info['size'])
+                        conn_delta = agent_info['size']
+                    else:
+                        conn_delta = pool.setSize(agent_info['size'])
+                    if conn_delta:
+                        db = queues._p_jar.db()
+                        db.setPoolSize(db.getPoolSize() + conn_delta)
+                    job = self._getJob(agent)
+                    while job is not None:
+                        if isinstance(job, twisted.python.failure.Failure):
+                            agent_info['error'] = job
+                            job = None
+                            try:
+                                agent.failure = res
+                            except zc.twist.EXPLOSIVE_ERRORS:
+                                raise
+                            except:
+                                transaction.abort()
+                                zc.async.utils.log.error(
+                                    'error trying to stash failure on agent')
+                            else:
+                                # TODO improve msg
+                                self._commit('trying to stash failure on agent')
+                        else:
+                            info = {'result': None,
+                                    'failed': False,
+                                    'poll id': None,
+                                    'quota names': job.quota_names,
+                                    'call': repr(job),
+                                    'started': None,
+                                    'completed': None,
+                                    'thread': None}
+                            started_jobs.append(info)
+                            dbname = getattr(
+                                job._p_jar.db(), 'database_name', None)
+                            jobid = (job._p_oid, dbname)
+                            self.jobs[jobid] = info
+                            job_info.append(jobid)
+                            pool.queue.put(
+                                (job._p_jar.db(), job._p_oid, info))
+                            job = self._getJob(agent)
+                queue.dispatchers.ping(self.UUID)
+                self._commit('trying to commit ping')
+                if len(pools) > len(queue_info):
+                    conn_delta = 0
+                    for name, pool in pools.items():
+                        if name not in agent_info:
+                            conn_delta += pool.setSize(0)
+                            self.dead_pools.append(pools.pop(name))
+                    if conn_delta:
+                        db = queues._p_jar.db()
+                        # this is a bit premature--it should really happen
+                        # when all threads are complete--but since the pool just
+                        # complains if the size is not honored, and this approach
+                        # is easier, we're doing this.
+                        db.setPoolSize(db.getPoolSize() + conn_delta)
+            if len(self.queues) > len(poll_info):
+                conn_delta = 0
+                for queue_pools in self.queues.values():
+                    if name not in poll_info:
+                        for name, pool in queue_pools.items():
+                            conn_delta += pool.setSize(0)
+                            self.dead_pools.append(queue_pools.pop(name))
+                if conn_delta:
+                    # this is a bit premature--it should really happen
+                    # when all threads are complete--but since the pool just
+                    # complains if the size is not honored, and this approach
+                    # is easier, we're doing this.
+                    self.db.setPoolSize(self.db.getPoolSize() + conn_delta)
+        finally:
+            transaction.abort()
+            try:
+                last = self.polls.first()
+            except ValueError:
+                last = None
+            self.polls.add(poll_info)
+            for info in started_jobs:
+                info['poll id'] = poll_info.key
+            if last is None or last != poll_info:
+                zc.async.utils.tracelog.debug(
+                    'poll %s: %r', poll_info.key, poll_info)
+
+    def directPoll(self):
+        if not self.activated:
+            return
+        try:
+            self.poll()
+        finally:
+            self.reactor.callLater(self.poll_interval, self.directPoll)
+
+    def _inThreadPoll(self, deferred):
+        try:
+            self.poll()
+        finally:
+            self.reactor.callFromThread(deferred.callback, None)
+
+    def threadedPoll(self):
+        if not self.activated:
+            return
+        deferred = twisted.internet.defer.Deferred()
+        self.reactor.callInThread(self._inThreadPoll, deferred)
+        deferred.addCallback(
+            lambda result: self.reactor.callLater(
+                self.poll_interval, self.threadedPoll))
+
+    def activate(self, threaded=False):
+        if self.activated:
+            raise ValueError('already activated')
+        zc.async.utils.log.info('attempting to activate dispatcher %s',
+                                self.UUID)
+        self.activated = datetime.datetime.utcnow()
+        # in case this is a restart, we clear old data
+        self.polls.clear()
+        self.jobs.clear()
+        # increase pool size to account for the dispatcher poll
+        self.db.setPoolSize(self.db.getPoolSize() + 1)
+        self.conn = self.db.open() # we keep the same connection for all
+        # polls as an optimization
+        if threaded:
+            self.reactor.callWhenRunning(self.threadedPoll)
+        else:
+            self.reactor.callWhenRunning(self.directPoll)
+        self.reactor.addSystemEventTrigger(
+            'before', 'shutdown', self.deactivate)
+
+    def deactivate(self):
+        if not self.activated:
+            raise ValueError('not activated')
+        self.activated = False
+        transaction.begin()
+        try:
+            queues = self.conn.root().get(zc.async.interfaces.KEY)
+            if queues is not None:
+                for queue in queues.values():
+                    da = queue.dispatchers.get(self.UUID)
+                    if da is not None and da.activated:
+                        da.deactivate()
+                self._commit('trying to tear down')
+        finally:
+            transaction.abort()
+            self.conn.close()
+        conn_delta = 0
+        for queue_pools in self.queues.values():
+            for name, pool in queue_pools.items():
+                conn_delta += pool.setSize(0)
+                self.dead_pools.append(queue_pools.pop(name))
+        conn_delta -= 1
+        self.db.setPoolSize(self.db.getPoolSize() + conn_delta)
+        zc.async.utils.log.info('deactivated dispatcher %s',
+                                self.UUID)
+
+    # these methods are used for monitoring and analysis
+
+    STOPPED = 'STOPPED'
+    RUNNING = 'RUNNING'
+    STUCK = 'STUCK'
+    STARTING = 'STARTING'
+
+    def getStatusInfo(self):
+        res = {'time since last poll': None, 'uptime': None, 'uuid': self.UUID}
+        poll_interval = res['poll interval'] = datetime.timedelta(
+                    seconds=self.poll_interval)
+        if not self.activated:
+            res['status'] = self.STOPPED
+        else:
+            now = datetime.datetime.utcnow()
+            try:
+                poll = self.polls.first()
+            except ValueError:
+                # no polls
+                next = self.activated + poll_interval
+                if next < now:
+                    res['status'] = self.STUCK
+                else:
+                    res['status'] = self.STARTING
+                res['time since last poll'] = now - self.activated
+            else:
+                next = poll.utc_timestamp + poll_interval
+                if next < now:
+                    res['status'] = self.STUCK
+                else:
+                    res['status'] = self.RUNNING
+                res['time since last poll'] = now - poll.utc_timestamp
+                res['uptime'] = now - self.activated
+        return res
+
+    def getJobInfo(self, oid, database_name=None):
+        if database_name is None:
+            # these will raise ValueErrors for unknown oids.  We'll let 'em.
+            minKey = self.jobs.minKey((oid,))
+            maxKey = self.jobs.maxKey((oid,))
+            if minKey != maxKey:
+                raise ValueError('ambiguous database name')
+            else:
+                database_name = minKey[1]
+        return self.jobs[(oid, database_name)]
+
+    def getActiveJobIds(self, queue=None, agent=None):
+        """returns active jobs from newest to oldest"""
+        res = []
+        try:
+            poll = self.polls.first()
+        except ValueError:
+            pass
+        else:
+            old = []
+            unknown = []
+            for info in _iter_info(poll, queue, agent):
+                res.extend(info['new jobs'])
+                for job_id in info['active jobs']:
+                    job_info = self.jobs.get(job_id)
+                    if job_info is None:
+                        unknown.append(job_id)
+                    else:
+                        bisect.insort(old, (job_info['poll id'], job_id))
+            res.extend(i[1] for i in old)
+            res.extend(unknown)
+        return res
+
+    def getPollInfo(self, at=None, before=None):
+        if at is not None:
+            if before is not None:
+                raise ValueError('may only provide one of `at` and `before`')
+            if isinstance(at, datetime.datetime):
+                at = zc.async.utils.dt_to_long(at)
+        elif before is not None:
+            if isinstance(before, datetime.datetime):
+                at = zc.async.utils.dt_to_long(before) + 16
+            else:
+                at = before + 1
+        for bucket in tuple(self.polls._data.buckets): # freeze order
+            try:
+                if at is None:
+                    key = bucket.minKey()
+                else:
+                    key = bucket.minKey(at)
+                return bucket[key]
+            except (ValueError, KeyError):
+                # ValueError because minKey might not have a value
+                # KeyError because bucket might be cleared in another thread
+                # between minKey and __getitem__
+                pass
+        raise ValueError('no poll matches')
+
+    def iterPolls(self, at=None, before=None, since=None, count=None):
+        # `polls` may be mutated during iteration so we don't iterate over it
+        if at is not None and before is not None:
+            raise ValueError('may only provide one of `at` and `before`')
+        if isinstance(since, datetime.datetime):
+            since = zc.async.utils.dt_to_long(since) + 15
+        ct = 0
+        while 1:
+            if count is not None and ct >= count:
+                break
+            try:
+                info = self.getPollInfo(at=at, before=before)
+            except ValueError:
+                break
+            else:
+                if since is None or before <= since:
+                    yield info
+                    ct += 1
+                    before = info.key
+                    at = None
+                else:
+                    break
+
+    def getStatistics(self, at=None, before=None, since=None, queue=None,
+                      agent=None):
+        if at is not None and before is not None:
+            raise ValueError('may only provide one of `at` and `before`')
+        res = {
+            'started': 0,
+            'successful': 0,
+            'failed': 0,
+            'unknown': 0
+            }
+        started = successful = failed = unknown = 0
+        _pair = (None, None)
+        successful_extremes = [_pair, _pair]
+        failed_extremes = [_pair, _pair]
+        active_extremes = [_pair, _pair]
+        now = datetime.datetime.utcnow()
+        first = True
+        poll = first_poll = None
+        def process(jobs):
+            for jobid in jobs:
+                jobinfo = self.jobs.get(jobid)
+                if jobinfo is None:
+                    res['unknown'] += 1
+                    continue
+                if jobinfo['completed']:
+                    if jobinfo['failed']:
+                        pair = failed_extremes
+                        res['failed'] += 1
+                    else:
+                        pair = successful_extremes
+                        res['successful'] += 1
+                else:
+                    pair = active_extremes
+                start = jobinfo['started'] or poll_time
+                stop = jobinfo['completed'] or now
+                duration = stop - start
+                if pair[0][0] is None or pair[0][0] > duration:
+                    pair[0] = (duration, jobid)
+                if pair[1][0] is None or pair[1][0] < duration:
+                    pair[1] = (duration, jobid)
+        for poll in self.iterPolls(at=at, before=before, since=since):
+            poll_time = poll.utc_timestamp
+            for agent_info in _iter_info(poll, queue, agent):
+                res['started'] += len(agent_info['new jobs'])
+                process(agent_info['new jobs'])
+            if first:
+                first = False
+                first_poll = poll
+        if poll is not None:
+            for agent_info in _iter_info(poll, queue, agent):
+                process(agent_info['active jobs'])
+        if first_poll is not None:
+            stat_start = first_poll.utc_timestamp
+            stat_end = poll.utc_timestamp
+        else:
+            start_start = None
+            stat_end = None
+        res.update({
+            'shortest successful': successful_extremes[0][1],
+            'longest successful': successful_extremes[1][1],
+            'shortest failed': failed_extremes[0][1],
+            'longest failed': failed_extremes[1][1],
+            'shortest active': active_extremes[0][1],
+            'longest active': active_extremes[1][1],
+            'statistics start': stat_start,
+            'statistics end': stat_end,
+            })
+        return res
+
+def _iter_info(poll, queue, agent):
+    if queue is None:
+        queues = poll.values()
+    elif queue not in poll:
+        queues = []
+    else:
+        queues = [poll[queue]]
+    for q in queues:
+        if agent is None:
+            for i in q.values():
+                yield i
+        elif agent in q:
+            yield q[agent]

Copied: zc.async/trunk/src/zc/async/dispatcher.txt (from rev 85211, zc.async/branches/dev/src/zc/async/dispatcher.txt)
===================================================================
--- zc.async/trunk/src/zc/async/dispatcher.txt	                        (rev 0)
+++ zc.async/trunk/src/zc/async/dispatcher.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,501 @@
+The dispatcher is the code responsible for actually performing the
+zc.async jobs.
+
+Typically, the dispatcher is a singleton for a given process.  Multiple
+dispatchers can be created at once, primarily for ease of testing.
+
+Dispatchers expect to get a "reactor" to power the timed calls.  This reactor
+must have several methods:
+
+class IReactor(zope.interface.Interface):
+    
+    def callFromThread(callable, *args, **kw):
+        """have callable run in reactor's thread, by reactor, ASAP.
+        
+        Intended to be called from a thread other than the reactor's main
+        loop.
+        """
+    
+    def callInThread(callable, *args, **kw):
+        """have callable run in a separate thread, ASAP.
+        
+        Must be called in same thread as reactor's main loop.
+        """
+    
+    def callLater(seconds, callable, *args, **kw):
+        """have callable run in reactor at least <seconds> from now
+        
+        Must be called in same thread as reactor's main loop.
+        """
+
+    def addSystemEventTrigger(phase, event, callable, *args, **kw):
+        """Install a callable to be run in phase of event.
+        
+        must support phase 'before', and event 'shutdown'.
+        """
+
+    def callWhenRunning(self, _callable, *args, **kw):
+        """run callable now if running, or when started.
+        """
+
+The twisted reactors provide these necessary methods.  You can also write
+your own reactor.
+
+For this example, we will use the twisted select reactor running in another
+thread.  Other doctests in this package use a test reactor defined in the
+zc.async.testing module, as an example of a write-your-own reactor.
+
+To instantiate the dispatcher, we need a reactor and a db.  We'll create it,
+then start it off in the thread.  We'll assume we already have the db and the
+necessary adapter registrations [#setUp]_.
+
+Here's the reactor.  The _handleSignals just lets the reactor handle signals.
+In most real world usage you'll need to be more careful, hooking into
+the signal handling of your larger app.  Look at the code in
+``zc.async.subscribers.ThreadedDispatcherInstaller`` for an example that should
+be sufficient for Zope.
+
+    >>> import twisted.internet.selectreactor
+    >>> reactor = twisted.internet.selectreactor.SelectReactor()
+    >>> reactor._handleSignals()
+
+Now here's our dispatcher.  The poll_interval is a number of seconds (float or
+int).  It defaults to 5 but we're setting it to 0.5 in order to make this run
+faster when this document is run as a test.
+
+    >>> import zc.async.dispatcher
+    >>> dispatcher = zc.async.dispatcher.Dispatcher(
+    ...     db, reactor, poll_interval=0.5)
+    >>> import zc.async.instanceuuid
+    >>> dispatcher.UUID is zc.async.instanceuuid.UUID
+    True
+    >>> dispatcher.reactor is reactor
+    True
+    >>> dispatcher.db is db
+    True
+    >>> dispatcher.poll_interval
+    0.5
+
+Now we're ready to start up.  Notice the current size of the db's connection
+pool.  When we activate the dispatcher we'll see that this automatically ups
+the connection pool for the db by 1, to include the dispatcher's poll.
+
+    >>> db.getPoolSize()
+    7
+
+    >>> import threading
+    >>> def start():
+    ...     dispatcher.activate()
+    ...     reactor.run(installSignalHandlers=0)
+    ...
+    >>> thread = threading.Thread(target=start)
+    >>> thread.setDaemon(True)
+    >>> import datetime
+    >>> initial = datetime.datetime.utcnow()
+    >>> thread.start()
+
+The dispatcher should be starting up now.  Let's wait for it to activate.
+We're using a test convenience, get_poll, defined in the footnotes
+[#get_poll]_.
+
+    >>> poll = get_poll(0)
+    >>> poll == {}
+    True
+    >>> initial <= poll.utc_timestamp <= datetime.datetime.utcnow()
+    True
+
+Now the pool size has increased, as we mentioned above.
+
+    >>> db.getPoolSize()
+    8
+
+Now let's add a queues folder and a queue.  The queues folder is always
+expected to be in the zc.async.interfaces.KEY key of the database root.
+
+    >>> import zc.async.queue
+    >>> import zc.async.interfaces
+    >>> container = root[zc.async.interfaces.KEY] = zc.async.queue.Queues()
+    >>> queue = container[''] = zc.async.queue.Queue()
+    >>> import transaction
+    >>> transaction.commit()
+
+Now the next poll will register and activate the dispatcher in the queue.
+
+    >>> poll = get_poll()
+
+This accomplished the following things.
+
+- The dispatcher registered and activated itself with the queue.
+
+    >>> t = transaction.begin() # sync
+    >>> list(queue.dispatchers) == [dispatcher.UUID]
+    True
+    >>> list(queue.dispatchers[dispatcher.UUID]) # these would be agent names
+    []
+    >>> queue.dispatchers[dispatcher.UUID].UUID == dispatcher.UUID
+    True
+    >>> bool(queue.dispatchers[dispatcher.UUID].activated)
+    True
+
+- The queue fired events to announce the dispatcher's registration and
+  activation.  We could have registered subscribers for either or both
+  of these events to create agents.
+  
+  Note that the dispatcher in queue.dispatchers is a persistent
+  representative of the actual dispatcher: they are different objects.
+
+    >>> from zope.component import eventtesting
+    >>> import zc.async.interfaces
+    >>> evs = eventtesting.getEvents(
+    ...     zc.async.interfaces.IDispatcherRegistered)
+    >>> evs # doctest: +ELLIPSIS
+    [<zc.async.interfaces.DispatcherRegistered object at ...>]
+    >>> evs[0].object._p_oid == queue.dispatchers[dispatcher.UUID]._p_oid
+    True
+
+    >>> evs = eventtesting.getEvents(
+    ...     zc.async.interfaces.IDispatcherActivated)
+    >>> evs # doctest: +ELLIPSIS
+    [<zc.async.interfaces.DispatcherActivated object at ...>]
+    >>> evs[0].object._p_oid == queue.dispatchers[dispatcher.UUID]._p_oid
+    True
+
+- The dispatcher made its first ping. A ping means that the dispatcher changes
+  a datetime to record that it is alive. 
+
+    >>> queue.dispatchers[dispatcher.UUID].last_ping is not None
+    True
+
+  The dispatcher needs to update its last_ping after every ``ping_interval``
+  seconds.  If it has not updated the last_ping after ``ping_death_interval``
+  then the dispatcher is considered to be dead, and active jobs in the
+  dispatcher's agents are ended (and given a chance to respond to that status
+  change, so they can put themselves back on the queue to be restarted if
+  desired).
+
+    >>> queue.dispatchers[dispatcher.UUID].ping_interval
+    datetime.timedelta(0, 30)
+    >>> queue.dispatchers[dispatcher.UUID].ping_death_interval
+    datetime.timedelta(0, 60)
+
+- We have some log entries.  (We're using some magic log handlers inserted by
+  setup code in tests.py here.)
+  
+    >>> print event_logs # doctest: +ELLIPSIS
+    zc.async.events INFO
+      attempting to activate dispatcher ...
+
+    >>> print trace_logs # doctest: +ELLIPSIS
+    zc.async.trace DEBUG
+      poll ...
+
+So the dispatcher is running now.  It still won't do any jobs until we tell
+it the kind of jobs it should perform.  Let's add in our default agent, with
+the default configuration that it is willing to perform any job.
+
+    >>> import zc.async.agent
+    >>> agent = zc.async.agent.Agent()
+    >>> queue.dispatchers[dispatcher.UUID]['main'] = agent
+    >>> transaction.commit()
+
+The next poll will include the fact that it asked the 'main' agent for
+a job.
+
+    >>> poll = get_poll()
+    >>> import pprint
+    >>> pprint.pprint(dict(poll))
+    {'': {'main': {'active jobs': [],
+                   'error': None,
+                   'len': 0,
+                   'new jobs': [],
+                   'size': 3}}}
+
+The pool size for the db has increased again to account for the size of the
+agent.
+
+    >>> db.getPoolSize()
+    11
+
+We can actually get it to perform some jobs now.  Here's a silly simple
+one.  We use a test convenience, wait_for_result, defined in the footnotes
+[#wait_for_result]_.
+
+    >>> import operator
+    >>> job1 = queue.put(
+    ...     zc.async.job.Job(operator.mul, 14, 3))
+    >>> print job1.result
+    None
+    >>> transaction.commit()
+
+    >>> wait_for_result(job1)
+    42
+
+That's cool.  We have a poll object that has a record of this too.
+
+    >>> for poll in dispatcher.polls:
+    ...     if (poll.get('') and poll[''].get('main') and
+    ...         poll['']['main']['new jobs']):
+    ...         break
+    ... else:
+    ...     assert False, 'poll not found'
+    ...
+    >>> pprint.pprint(dict(poll)) # doctest: +ELLIPSIS
+    {'': {'main': {'active jobs': [],
+                   'error': None,
+                   'len': 0,
+                   'new jobs': [('\x00...', 'unnamed')],
+                   'size': 3}}}
+
+We also have some log entries.
+
+    >>> info = debug = None
+    >>> for r in reversed(trace_logs.records):
+    ...     if info is None and r.levelname == 'INFO':
+    ...         info = r
+    ...     elif debug is None and r.levelname == 'DEBUG':
+    ...         debug = r
+    ...     elif info is not None and debug is not None:
+    ...         break
+    ... else:
+    ...     assert False, 'could not find'
+    ...
+    >>> print info.getMessage() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    <zc.async.job.Job (oid ..., db 'unnamed')
+     ``<built-in function mul>(14, 3)``> succeeded in thread ... with result:
+    42
+
+    >>> print debug.getMessage() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    poll ...:
+      {'':
+        {'main':
+          {'active jobs': [], 'error': None,
+           'new jobs': [('\x...', 'unnamed')], 'len': 0, 'size': 3}}}
+    
+
+[#getPollInfo]_ Notice our ``new jobs`` from the poll and the log has a value
+in it now. We can get some information about that job from the dispatcher.
+
+    >>> info = dispatcher.getJobInfo(*poll['']['main']['new jobs'][0])
+    >>> pprint.pprint(info)
+    ... # doctest: +ELLIPSIS
+    {'call': "<zc.async.job.Job (oid ..., db 'unnamed') ``<built-in function mul>(14, 3)``>",
+     'completed': datetime.datetime(...),
+     'failed': False,
+     'poll id': ...,
+     'quota names': (),
+     'result': '42',
+     'started': datetime.datetime(...),
+     'thread': ...}
+     >>> info['thread'] is not None
+     True
+     >>> info['poll id'] is not None
+     True
+
+Notice that the result is a repr.  If this had been a failure, it would have
+been a (very) verbose traceback [#show_error]_.
+
+As seen in other documents in zc.async, the job can also be a method of a
+persistent object, affecting a persistent object.
+
+    >>> import BTrees.Length
+    >>> length = root['length'] = BTrees.Length.Length()
+    >>> length()
+    0
+    >>> job2 = queue.put(zc.async.job.Job(length.change, 4))
+    >>> transaction.commit()
+    >>> wait_for_result(job2)
+    >>> length()
+    4
+
+``zc.async.local`` also allows some fun tricks.  Your method can access the
+job--for instance, to access the queue and put another job in, or access
+annotations on the job, as of the last database sync for the thread's
+connection (at transaction boundaries).
+
+    >>> import zc.async
+    >>> def hand_off():
+    ...     job = zc.async.local.getJob()
+    ...     return job.queue.put(zc.async.job.Job(operator.mul, 21, 2))
+    ...
+    >>> job3 = queue.put(hand_off)
+    >>> transaction.commit()
+
+    >>> wait_for_result(job3)
+    42
+
+It can also get and set job annotations *live, in another connection*. 
+This allows you to send messages about job progress, or get live
+information about whether you should change or stop your work, for
+instance.
+
+An important caveat about this is that the annotations, whether a get or
+a set, must not be persistent objects, or contain them directly or indirectly.
+We use a new test convenience , wait_for_annotation, defined in the footnotes
+[#wait_for_annotation]_.
+
+    >>> def annotation_func():
+    ...     zc.async.local.setLiveAnnotation('hello', 'from thread!')
+    ...     reply = zc.async.local.getLiveAnnotation(
+    ...         'reply', timeout=3, poll=0.1)
+    ...     local = zc.async.local.getJob().annotations.get('reply', 'MISSING')
+    ...     return 'reply is %r.  Locally it is %s.' % (reply, local)
+    ...
+    >>> job4 = queue.put(annotation_func)
+    >>> transaction.commit()
+    
+    >>> wait_for_annotation(job4, 'hello')
+    'from thread!'
+    >>> job4.annotations['reply'] = 'HIYA'
+    >>> transaction.commit()
+    >>> wait_for_result(job4)
+    "reply is 'HIYA'.  Locally it is MISSING."
+
+We can analyze the work the dispatcher has done. The records for this generally
+only go back about ten or twelve minutes--just enough to get a feel for the
+current health of the dispatcher. Use the log if you want more long-term
+analysis.
+
+    >>> pprint.pprint(dispatcher.getStatistics()) # doctest: +ELLIPSIS
+    {'failed': 1,
+     'longest active': None,
+     'longest failed': ('\x00...', 'unnamed'),
+     'longest successful': ('\x00...', 'unnamed'),
+     'shortest active': None,
+     'shortest failed': ('\x00...', 'unnamed'),
+     'shortest successful': ('\x00...', 'unnamed'),
+     'started': 6,
+     'statistics end': datetime.datetime(...),
+     'statistics start': datetime.datetime(...),
+     'successful': 5,
+     'unknown': 0}
+
+We can get a report on the reactor's status.
+
+    >>> pprint.pprint(dispatcher.getStatusInfo()) # doctest: +ELLIPSIS
+    {'poll interval': datetime.timedelta(0, 0, 500000),
+     'status': 'RUNNING',
+     'time since last poll': datetime.timedelta(...),
+     'uptime': datetime.timedelta(...)}
+
+When we stop the reactor, the dispatcher also deactivates.
+
+    >>> reactor.callFromThread(reactor.stop)
+    >>> thread.join(3)
+
+    >>> pprint.pprint(dispatcher.getStatusInfo()) # doctest: +ELLIPSIS
+    {'poll interval': datetime.timedelta(0, 0, 500000),
+     'status': 'STOPPED',
+     'time since last poll': None,
+     'uptime': None,
+     'uuid': UUID('...')}
+
+The db's pool size has returned to the original value.
+
+    >>> db.getPoolSize()
+    7
+
+.. ......... ..
+.. Footnotes ..
+.. ......... ..
+
+.. [#setUp]
+
+    >>> import ZODB.FileStorage
+    >>> storage = ZODB.FileStorage.FileStorage(
+    ...     'main.fs', create=True)
+    >>> from ZODB.DB import DB 
+    >>> db = DB(storage) 
+    >>> conn = db.open()
+    >>> root = conn.root()
+    >>> import zc.async.configure
+    >>> zc.async.configure.base()
+
+.. [#get_poll]
+
+    >>> import time
+    >>> def get_poll(count = None):
+    ...     if count is None:
+    ...         count = len(dispatcher.polls)
+    ...     for i in range(30):
+    ...         if len(dispatcher.polls) > count:
+    ...             return dispatcher.polls.first()
+    ...         time.sleep(0.1)
+    ...     else:
+    ...         assert False, 'no poll!'
+    ... 
+
+.. [#wait_for_result]
+
+    >>> import zc.async.interfaces
+    >>> def wait_for_result(job):
+    ...     for i in range(30):
+    ...         t = transaction.begin()
+    ...         if job.status == zc.async.interfaces.COMPLETED:
+    ...             return job.result
+    ...         time.sleep(0.1)
+    ...     else:
+    ...         assert False, 'job never completed'
+    ...
+
+.. [#getPollInfo] The dispatcher has a ``getPollInfo`` method that lets you
+    find this poll information also.
+    
+    >>> dispatcher.getPollInfo(at=poll.key) is poll
+    True
+    >>> dispatcher.getPollInfo(at=poll.utc_timestamp) is poll
+    True
+    >>> dispatcher.getPollInfo(before=poll.key) is not poll
+    True
+    >>> dispatcher.getPollInfo(before=poll.utc_timestamp) is not poll
+    True
+    >>> dispatcher.getPollInfo(before=poll.key-16) is poll
+    True
+    >>> dispatcher.getPollInfo(
+    ...     before=poll.utc_timestamp + datetime.timedelta(seconds=0.4)
+    ...     ) is poll
+    True
+
+.. [#show_error] OK, so you want to see a verbose traceback?  OK, you asked
+    for it. We're eliding more than 90% of this, and this is a small one,
+    believe it or not. Rotate your logs!
+    
+    Notice that all of the values in the logs are reprs.
+
+    >>> bad_job = queue.put(
+    ...     zc.async.job.Job(operator.mul, 14, None))
+    >>> transaction.commit()
+
+    >>> wait_for_result(bad_job)
+    <zc.twist.Failure exceptions.TypeError>
+    
+    >>> for r in reversed(trace_logs.records):
+    ...     if r.levelname == 'ERROR':
+    ...         break
+    ... else:
+    ...     assert False, 'could not find log'
+    ...
+    >>> print r.getMessage() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    <zc.async.job.Job (oid ..., db 'unnamed')
+     ``<built-in function mul>(14, None)``> failed in thread ... with result:
+    *--- Failure #... (pickled) ---
+    .../zc/async/job.py:...: _call_with_retry(...)
+     [ Locals ]...
+     ( Globals )...
+    .../zc/async/job.py:...: <lambda>(...)
+     [ Locals ]...
+     ( Globals )...
+    exceptions.TypeError: unsupported operand type(s) for *: 'int' and 'NoneType'
+    *--- End of Failure #... ---
+    <BLANKLINE>
+
+.. [#wait_for_annotation]
+
+    >>> def wait_for_annotation(job, name):
+    ...     for i in range(30):
+    ...         t = transaction.begin()
+    ...         if name in job.annotations:
+    ...             return job.annotations[name]
+    ...         time.sleep(0.1)
+    ...     else:
+    ...         assert False, 'TIMEOUT'
+    ...

Copied: zc.async/trunk/src/zc/async/dispatcher.zcml (from rev 85211, zc.async/branches/dev/src/zc/async/dispatcher.zcml)
===================================================================
--- zc.async/trunk/src/zc/async/dispatcher.zcml	                        (rev 0)
+++ zc.async/trunk/src/zc/async/dispatcher.zcml	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configure xmlns="http://namespaces.zope.org/zope">
+    <include file="configure.zcml" />
+    <utility component=".monitor.async"
+             provides="zc.z3monitor.interfaces.IZ3MonitorPlugin" name="async" />
+    <!-- maybe could divide up queue_installer so queues collection is added
+         here? -->
+</configure>

Deleted: zc.async/trunk/src/zc/async/engine.py
===================================================================
--- zc.async/trunk/src/zc/async/engine.py	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/engine.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -1,254 +0,0 @@
-import uuid
-import Queue
-import thread
-import threading
-import datetime
-import logging
-import pytz
-import twisted.internet.reactor
-import ZODB.POSException
-import transaction
-import transaction.interfaces
-
-import zc.twist
-    
-def remove(container, partial, result=None):
-    container.remove(partial)
-    container.__parent__.completed.add(partial)
-    
-def perform(p):
-    p()
-    p.addCallback(zc.async.partial.Partial(remove, p.__parent__, p))
-
-engines = {}
-
-class Engine(object):
-    # this intentionally does not have an interface.  It would be nicer if this
-    # could be a Twisted service, part of the main Zope service, but that does
-    # not appear easy to arrange at the moment.  Therefore we have a subscriber
-    # in subscribers.py that does custom set-up, using raw reactor code.
-    # Eventually I'd like to move this to a service interface, and tie it to
-    # the Zope service in the subscriber.
-
-    _needed = 0
-    alive = True
-
-    def __init__(self, UUID, factory):
-        self.workerUUID = UUID
-        self.factory = factory
-        self.thread_queue = Queue.Queue(0)
-        self._threads = []
-        self.UUID = uuid.uuid4() # this is supposed to distinguish this engine
-        # instance from any others potentially wanting to work on the worker.
-        assert UUID not in engines
-        engines[UUID] = self
-
-    def perform_thread(self):
-        try:
-            job = self.thread_queue.get()
-            while job is not None:
-                db, identifier = job
-                conn = db.open()
-                removal = None
-                try:
-                    transaction.begin()
-                    p = conn.get(identifier)
-                    p.thread = thread.get_ident()
-                    transaction.commit()
-                    removal = zc.twist.Partial(remove, p.__parent__, p)
-                    try:
-                        p() # this does the committing and retrying, largely
-                    except ZODB.POSException.TransactionError:
-                        transaction.abort()
-                        while 1:
-                            try:
-                                p.fail()
-                                transaction.commit()
-                            except ZODB.POSException.TransactionError:
-                                transaction.abort() # retry forever (!)
-                            else:
-                                break
-                finally:
-                    conn.close()
-                    if removal is not None:
-                        twisted.internet.reactor.callFromThread(removal)
-                job = self.thread_queue.get()
-        finally:
-            # this may cause some bouncing, but we don't ever want to end
-            # up with fewer than needed.
-            twisted.internet.reactor.callFromThread(self.set_threads)
-    
-    def set_threads(self, needed=None):
-        # this should only be called from the main thread (otherwise it needs
-        # locks)
-        if needed is None:
-            needed = self._needed
-        else:
-            self._needed = needed
-        res = []
-        ct = 0
-        for t in self._threads:
-            if t.isAlive():
-                res.append(t)
-                ct += 1
-        self._threads[:] = res
-        if ct < needed:
-            for i in range(max(needed - ct, 0)):
-                t = threading.Thread(target=self.perform_thread)
-                self._threads.append(t)
-                t.start()
-        elif ct > needed:
-            # this may cause some bouncing, but hopefully nothing too bad.
-            for i in range(ct - needed):
-                self.thread_queue.put(None)
-    
-    def poll(self, datamanager):
-        if not self.alive:
-            return
-        poll_seconds = 0.25
-        call = zc.twist.Partial(self.poll, datamanager)
-        try:
-            tm = transaction.interfaces.ITransactionManager(datamanager)
-            tm.begin()
-            now = datetime.datetime.now(pytz.UTC)
-            worker = datamanager.workers.get(self.workerUUID)
-            if worker is not None:
-                if worker.engineUUID is None:
-                    worker.engineUUID = self.UUID
-                    try:
-                        tm.commit()
-                    except ZODB.POSException.TransactionError:
-                        # uh-oh.  Somebody else may be adding a worker for the
-                        # same UUID.  we'll just return for now, and figure that
-                        # the next go-round will report the problem.
-                        return # will call finally clause, including abort
-                elif worker.engineUUID != self.UUID:
-                    # uh-oh.  Maybe another engine is in on the action?
-                    time_of_death = (worker.last_ping + worker.ping_interval
-                                     + worker.ping_death_interval)
-                    if time_of_death < now:
-                        # hm.  Looks like it's dead.
-                        zc.async.datamanager.cleanDeadWorker(worker)
-                        worker.engineUUID = self.UUID
-                        try:
-                            tm.commit()
-                        except ZODB.POSException.TransactionError:
-                            # uh-oh.  Somebody else may be adding a worker for
-                            # the same UUID.  we'll just return for now, and
-                            # figure that the next go-round will report the
-                            # problem.
-                            return # will call finally clause, including abort
-                    else:
-                        # this is some other engine's UUID,
-                        # and it isn't dead (yet?).  Houston, we have a problem.
-                        interval = time_of_death - now
-                        logging.warning(
-                            'Another engine instance, %s, has claimed worker '
-                            '%s.  This engine instance, %s, is '
-                            "deferring.  The other engine will be "
-                            "regarded dead and scheduled for removal after "
-                            '%d days, %d seconds, and %d microseconds',
-                            worker.engineUUID, worker.UUID, self.UUID,
-                            interval.days, interval.seconds,
-                            interval.microseconds)
-                        return # which will call the finally clause
-            else:
-                worker = self.factory(self.workerUUID)
-                datamanager.workers.add(worker)
-                worker.engineUUID = self.UUID
-                try:
-                    tm.commit()
-                except ZODB.POSException.TransactionError:
-                    # uh-oh.  Somebody else may be adding a worker for the
-                    # same UUID.  we'll just return for now, and figure that
-                    # the next go-round will report the problem.
-                    return # will call finally clause, including abort
-            poll_seconds = worker.poll_seconds
-            datamanager.checkSibling(worker.UUID)
-            try:
-                tm.commit()
-            except ZODB.POSException.TransactionError:
-                tm.abort()
-                # we'll retry next poll.  Let's keep going.
-            if (worker.completed.last_rotation +
-                worker.completed.rotation_interval) <= now:
-                worker.completed.rotate()
-                try:
-                    tm.commit()
-                except ZODB.POSException.TransactionError:
-                    tm.abort()
-                    # we'll retry next poll.  Keep going.
-            if worker.last_ping + worker.ping_interval <= now:
-                worker.last_ping = now
-                try:
-                    tm.commit()
-                except ZODB.POSException.TransactionError:
-                    # uh-oh: are there two engines working with the same worker?
-                    logging.error(
-                        "Transaction error for worker %s.  This should not "
-                        "happen.", self.workerUUID)
-                    return
-            def thread_size():
-                if len(datamanager.workers) == 1:
-                    return 1
-                else:
-                    return worker.thread_size
-            self.set_threads(thread_size())
-            while len(worker.thread) < thread_size():
-                p = datamanager.thread.pullNext(uuid)
-                if p is not None:
-                    worker.thread.add(p)
-                    try:
-                        tm.commit()
-                    except ZODB.POSException.TransactionError:
-                        tm.abort()
-                    else:
-                        self.thread_queue.put((p._p_jar.db(), p._p_oid))
-                else:
-                    break
-            self.set_threads(thread_size())
-            while len(worker.reactor) < worker.reactor.size:
-                p = datamanager.reactor.pullNext(uuid)
-                if p is not None:
-                    worker.reactor.add(p)
-                    try:
-                        tm.commit()
-                    except ZODB.POSException.TransactionError:
-                        tm.abort()
-                    else:
-                        twisted.internet.reactor.callLater(
-                            0, zc.twist.Partial(perform, p))
-                else:
-                    break
-            now = datetime.datetime.now(pytz.UTC)
-            if worker.last_ping + worker.ping_interval <= now:
-                worker.last_ping = now
-                try:
-                    tm.commit()
-                except ZODB.POSException.TransactionError:
-                    # uh-oh: are there two engines working with the same worker?
-                    logging.error(
-                        "Transaction error for worker %s.  This should not "
-                        "happen.", self.workerUUID)
-                    return
-        finally:
-            tm.abort()
-            if self.alive:
-                twisted.internet.reactor.callLater(poll_seconds, call)
-
-    def tearDown(self, datamanager):
-        self.alive = False
-        self.set_threads(0)
-        try:
-            tm = transaction.interfaces.ITransactionManager(datamanager)
-            tm.begin()
-            worker = datamanager.workers.get(self.workerUUID)
-            if worker is not None:
-                worker.engineUUID = None
-                datamanager.thread.put(
-                    zc.async.partial.Partial(
-                        zc.async.datamanager.cleanDeadWorker, worker))
-                tm.commit()
-        finally:
-            tm.abort()
-        

Modified: zc.async/trunk/src/zc/async/instanceuuid.py
===================================================================
--- zc.async/trunk/src/zc/async/instanceuuid.py	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/instanceuuid.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -10,20 +10,31 @@
 ------------------------------------------------------------------------
 The value above (and this file) is created and used by the zc.async
 package. It is intended to uniquely identify this software instance when
-it is used to start a zc.async worker process.  This allows multiple
-workers to connect to a single database to do work.  The software
-expects an instance home to only generate a single process.
+it is used to start a zc.async dispatcher.  This allows multiple
+dispatchers, each in its own software instance, to connect to a single
+database to do work.
 
-To get a new identifier for this software instance, delete this file and
-restart Zope (or more precisely, delete this file, restart Python, and
-import zc.async.instanceuuid).  This file will be recreated with a new value.
+In order to decide where to look for this file (or to create it, if
+necessary), the module looks in ``os.environ['ZC_ASYNC_UUID']`` for a file
+name.  If you are using Zope 3, you can set this in a zdaemon environment
+section of your zdaemon.conf.
+
+If the ``ZC_ASYNC_UUID`` is not found in the environment, it will use
+``os.path.join(os.getgwd(), 'uuid.txt')`` as the file name.
+
+To get a new identifier for this software instance, delete this file,
+restart Python, and import zc.async.instanceuuid.  This file will be
+recreated with a new value.
 """
 
 zope.interface.classImplements(uuid.UUID, zc.async.interfaces.IUUID)
 
+key = 'ZC_ASYNC_UUID'
+
 def getUUID():
-    file_name = os.path.join(
-        os.environ.get("INSTANCE_HOME"), 'etc', 'uuid.txt')
+    file_name = os.environ.get(key)
+    if not file_name:
+        file_name = os.path.join(os.getcwd(), 'uuid.txt')
     if os.path.exists(file_name):
         f = open(file_name, 'r')
         UUID = uuid.UUID(f.readline().strip())

Modified: zc.async/trunk/src/zc/async/interfaces.py
===================================================================
--- zc.async/trunk/src/zc/async/interfaces.py	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/interfaces.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -5,85 +5,171 @@
 import zc.queue.interfaces
 from zc.async.i18n import _
 
-PENDING = _('pending-state', 'Pending')
-ACTIVE = _('active-state', 'Active')
-CALLBACKS = _('callback-state', 'Performing Callbacks')
-COMPLETED = _('completed-state', 'Completed')
+# this is our only direct dependency on anything in zope.app, which is
+# only used by our convenience subscribers.  Since we don't really need this,
+# or zope.app, we make this import optional and provide some replacements if
+# necessary.
+try:
+    from zope.app.appsetup.interfaces import (IDatabaseOpenedEvent,
+                                              DatabaseOpened)
+except ImportError:
+    class IDatabaseOpenedEvent(zope.interface.Interface):
+        """The main database has been opened."""
+    
+        database = zope.interface.Attribute("The main database.")
+    
+    class DatabaseOpened(object):
+        zope.interface.implements(IDatabaseOpenedEvent)
+    
+        def __init__(self, database):
+            self.database = database
 
+# TODO: these interfaces are not particularly complete.  The other
+# documentation is more accurate at the moment.
+
+KEY = 'zc.async'
+
+NEW = _('new-status', 'New')
+PENDING = _('pending-status', 'Pending')
+ASSIGNED = _('assigned-status', 'Assigned')
+ACTIVE = _('active-status', 'Active')
+CALLBACKS = _('callback-status', 'Performing Callbacks')
+COMPLETED = _('completed-status', 'Completed')
+
+class IReactor(zope.interface.Interface):
+    """This describes what the dispatcher expects of the reactor.
+    
+    The reactor does not need to actually provide this interface."""
+    
+    def callFromThread(callable, *args, **kw):
+        """have callable run in reactor's thread, by reactor, ASAP.
+        
+        Intended to be called from a thread other than the reactor's main
+        loop.
+        """
+    
+    def callInThread(callable, *args, **kw):
+        """have callable run in a separate thread, ASAP.
+        
+        Must be called in same thread as reactor's main loop.
+        """
+    
+    def callLater(seconds, callable, *args, **kw):
+        """have callable run in reactor at least <seconds> from now
+        
+        Must be called in same thread as reactor's main loop.
+        """
+
+    def addSystemEventTrigger(phase, event, callable, *args, **kw):
+        """Install a callable to be run in phase of event.
+        
+        must support phase 'before', and event 'shutdown'.
+        """
+
+    def callWhenRunning(self, _callable, *args, **kw):
+        """run callable now if running, or when started.
+        """
+
+
+class IObjectEvent(zope.interface.Interface):
+    """Event happened to object"""
+    
+    object = zope.interface.Attribute('the object')
+
+class AbstractObjectEvent(object):
+    def __init__(self, object):
+        self.object = object
+
+class IDispatcherRegistered(IObjectEvent):
+    """Dispatcher was registered"""
+
+class DispatcherRegistered(AbstractObjectEvent):
+    zope.interface.implements(IDispatcherRegistered)
+
+class IDispatcherUnregistered(IObjectEvent):
+    """Dispatcher was unregistered"""
+
+class DispatcherUnregistered(AbstractObjectEvent):
+    zope.interface.implements(IDispatcherUnregistered)
+
+class IDispatcherActivated(IObjectEvent):
+    """Dispatcher was activated"""
+
+class DispatcherActivated(AbstractObjectEvent):
+    zope.interface.implements(IDispatcherActivated)
+
+class IDispatcherDeactivated(IObjectEvent):
+    """Dispatcher was deactivated"""
+
+class DispatcherDeactivated(AbstractObjectEvent):
+    zope.interface.implements(IDispatcherDeactivated)
+
 class AbortedError(Exception):
     """An explicit abort, as generated by the default behavior of
-    IPartial.fail"""
+    IJob.fail"""
 
-class BadStateError(Exception):
-    """The partial is not in the state it should be for the call being made.
+
+class BadStatusError(Exception):
+    """The job is not in the status it should be for the call being made.
     This is almost certainly a programmer error."""
 
-class IPartialFactory(zope.interface.Interface):
 
-    def __call__(self, call, *args, **kwargs):
-        """return an IPartial with the given call, args, and kwargs"""
+class IJob(zope.interface.Interface):
 
-    def bind(self, call, *args, **kwargs):
-        """returns IPartial with the IPartial inserted as first value in args.
-        """
+    parent = zope.interface.Attribute(
+        """The current canonical location of the job""")
 
-class IPartial(zope.interface.Interface):
-
-    __parent__ = zope.interface.Attribute(
-        """The current canonical location of the partial""")
-
     callable = zope.interface.Attribute(
-        """The callable object that should be called with *IPartial.args and
-        **IPartial.kwargs when the IPartial is called.  Mutable.""")
+        """The callable object that should be called with *IJob.args and
+        **IJob.kwargs when the IJob is called.  Mutable.""")
 
     args = zope.interface.Attribute(
         """a peristent list of the args that should be applied to self.call.
         May include persistent objects (though note that, if passing a method
-        is desired, it will typicall need to be wrapped in an IPartial).""")
+        is desired, it will typicall need to be wrapped in an IJob).""")
 
     kwargs = zope.interface.Attribute(
         """a persistent mapping of the kwargs that should be applied to
         self.call.  May include persistent objects (though note that, if
         passing a method is desired, it will typicall need to be wrapped
-        in an IPartial).""")
+        in an IJob).""")
 
-    state = zope.interface.Attribute(
-        """One of constants defined in zc.async.interfaces: PENDING,
-        ACTIVE, CALLBACKS, COMPLETED.  PENDING means not yet called. 
-        ACTIVE means in the process of being called.  CALLBACKS means in
-        the process of calling callbacks.  COMPLETED means called.""")
+    status = zope.interface.Attribute(
+        """One of constants defined in zc.async.interfaces:
+        NEW, PENDING, ASSIGNED, ACTIVE, CALLBACKS, COMPLETED.
 
+        NEW means not added to a queue and not yet called.        
+        PENDING means addded to a queue but not an agent, and not yet called.
+        ASSIGNED means added to an agent and not yet called.
+        ACTIVE means in the process of being called.
+        CALLBACKS means in the process of calling callbacks.
+        COMPLETED means called.""")
+
     result = zope.interface.Attribute(
         """The result of the call.  When state equals PENDING or ACTIVE, will
         be None.  When COMPLETED, will be a twisted.python.failure.Failure
         describing the call failure or the successful result.""")
 
     callbacks = zope.interface.Attribute(
-        """A mutable persistent list of the callback partials added by
+        """A mutable persistent list of the callback jobs added by
         addCallbacks.""")
 
-    unhandled_error = zope.interface.Attribute(
-        """A boolean: whether this partial has an unhandled error.
-        An unhandled error is defined as a Failure result on any callback
-        leaf node, or a Failure on this partial if this has no callbacks
-        or if it has one or more incomplete callback.""")
-
     annotations = zope.interface.Attribute(
         """An OOBTree that is available for metadata use.""")
 
     def addCallbacks(success=None, failure=None):
-        """if success or failure is not None, adds a callback partial to
-        self.callbacks and returns the partial.  Otherwise returns self.
-        success and failure must be None or adaptable to IPartial.
+        """if success or failure is not None, adds a callback job to
+        self.callbacks and returns the job.  Otherwise returns self.
+        success and failure must be None or adaptable to IJob.
         addCallbacks may be called multiple times.  Each will be called
-        with the result of this partial.  If callback is already in COMPLETED
+        with the result of this job.  If callback is already in COMPLETED
         state then the callback will be performed immediately."""
 
     def addCallback(callback):
         """callback will receive result (independent of whether it is a
-        success or a failure).  callback must be adaptable to IPartial.
+        success or a failure).  callback must be adaptable to IJob.
         addCallback may be called multiple times.  Each will be called
-        with the result of this partial.  If callback is already in
+        with the result of this job.  If callback is already in
         COMPLETED state then the callback will be performed immediately."""
 
     def __call__(*args, **kwargs):
@@ -92,86 +178,106 @@
         for the call."""
 
     def fail(e=AbortedError):
-        """Fail this partial, with option error e.  May only be called when
-        partial is in PENDING or ACTIVE states, or else raises BadStateError.
+        """Fail this job, with option error e.  May only be called when
+        job is in PENDING or ACTIVE states, or else raises BadStatusError.
         If e is not provided,"""
 
     def resumeCallbacks():
-        """Make all callbacks remaining for this partial.  Any callbacks
+        """Make all callbacks remaining for this job.  Any callbacks
         that are in PENDING state should be called normally; any callbacks
         in ACTIVE state should be `fail`ed; any callbacks in CALLBACKS state
         should `resumeCallback`; and any callbacks in COMPLETED state should
-        be untouched.  May only be called when partial is in CALLBACKS state.
+        be untouched.  May only be called when job is in CALLBACKS state.
         State will be COMPLETED after this call."""
 
-class IDataManagerPartial(IPartial):
-    """An async partial with all the necessary knobs to by put in a
-    datamanager."""
-
-    workerUUID = zope.interface.Attribute(
-        """The UUID of the IWorker who is, or was, responsible for this
-        partial.  None initially.  Should be assigned by
-        IWorker.[reactor|thread].put.""")
-
     assignerUUID = zope.interface.Attribute(
         """The UUID of the software instance that was in charge when the
-        IPartial was put in an IPartialQueue.  Should be assigned by
-        IPartialQueue.put.""")
+        IJob was put in an IJobQueue.  Should be assigned by
+        IJobQueue.put.""")
 
-    selectedUUIDs = zope.interface.Attribute(
-        """a set of selected worker UUIDs.  If it is empty, it is
-        interpreted as the set of all available workerUUIDs.  Only
-        workers with UUIDs in the set may perform it.
+#     selectedUUIDs = zope.interface.Attribute(
+#         """a set of selected worker UUIDs.  If it is empty, it is
+#         interpreted as the set of all available workerUUIDs.  Only
+#         workers with UUIDs in the set may perform it.
+# 
+#         If a worker would have selected this job for a run, but the
+#         difference of selected_workerUUIDs and excluded_workerUUIDs
+#         stopped it, it is responsible for verifying that the effective
+#         set of workerUUIDs intersects with the available workers; if the
+#         intersection contains no possible workers, the worker should
+#         call job.fail().""")
 
-        If a worker would have selected this partial for a run, but the
-        difference of selected_workerUUIDs and excluded_workerUUIDs
-        stopped it, it is responsible for verifying that the effective
-        set of workerUUIDs intersects with the available workers; if the
-        intersection contains no possible workers, the worker should
-        call partial.fail().""")
-
-    excludedUUIDs = zope.interface.Attribute(
-        """a set of excluded worker UUIDs.  Workers with UUIDs in this
-        set may not perform the partial.
-
-        If a worker would have selected this partial for a run, but the
-        difference of selected_workerUUIDs and excluded_workerUUIDs
-        stopped it, it is responsible for verifying that the effective
-        set of workerUUIDs intersects with the available workers; if the
-        intersection contains no possible workers, the worker should
-        call partial.fail().""")
-
     begin_after = zope.interface.Attribute(
         """A datetime.datetime in UTC of the first time when the
-        partial may run.  Cannot be set after partial gets a data_manager.
+        job may run.  Cannot be set after job gets a data_manager.
         """)
 
     begin_by = zope.interface.Attribute(
         """A datetime.timedelta of the duration after the begin_after
-        value after which the partial will fail, if it has not already
-        begun.  Cannot be set after partial has begun.""")
+        value after which the job will fail, if it has not already
+        begun.  Cannot be set after job has begun.""")
 
-    thread = zope.interface.Attribute(
-        """None or thread.get_ident() of the worker who performs it.  If a
-        reactor partial, must be None.""")
 
-class IPartialQueue(zc.queue.interfaces.IQueue):
+class IAgent(zope.interface.common.sequence.IFiniteSequence):
+    """Responsible for picking jobs and keeping track of them.
+    
+    An agent is a persistent object in a queue that is associated with a
+    dispatcher and is responsible for picking jobs and keeping track of
+    them. Zero or more agents within a queue can be associated with a
+    dispatcher. 
+    
+    Each agent for a given dispatcher is identified uniquely with a
+    name.  A fully (universally) unique identifier for the agent can be
+    obtained by combining the key of the agent's queue in the main queue
+    mapping at the ZODB root; the UUID of the agent's dispatcher; and
+    the agent's name.
+    """
+    
+    size = zope.interface.Attribute(
+        """The maximum number of jobs this agent should have active at a time.
+        """)
 
-    __parent__ = zope.interface.Attribute(
+    name = zope.interface.Attribute(
+        """The name for this agent.  Unique within its dispatcher's jobs for
+        its queue.  Can be used to obtain agent with 
+        queue.dispatchers[*dispatcher UUID*][*name*].""")
+
+    completed = zope.interface.Attribute(
+        """an ICompleted of recent completed jobs.""")
+
+    parent = zope.interface.Attribute(
+        """a link to parent: an IDispatcherAgents container.""")
+
+    def get():
+        """get a new item, obtained from queue; or None if there are no
+        items in the queue that this agent wants to take, or the agent is
+        full.  If an item is returned, it has also been added to the agent.
+        """
+
+    def remove(item):
+        """remove item, or raise ValueError if item is not in queue"""
+
+    def __delitem__(index):
+        """delete item at index"""
+
+    def index(item):
+        """return index, or raise ValueError if item is not in queue"""
+
+
+class IQueue(zc.queue.interfaces.IQueue):
+
+    parent = zope.interface.Attribute(
         """the IDataManager of which this is a part.""")
 
-    thread = zope.interface.Attribute(
-        """boolean of whether this is a thread or reactor queue""")
-
     def put(item, begin_after=None, begin_by=None):
-        """Put an IPartial adapted from item into the queue.  Returns IPartial.
+        """Put an IJob adapted from item into the queue.  Returns IJob.
 
-        Rememeber that IPartials are not guaranteed to be run in order
+        Rememeber that IJobs are not guaranteed to be run in order
         added to a queue.  If you need sequencing, use
-        IPartial.addCallbacks.
+        IJob.addCallbacks.
         
-        item must be an IPartial, or be adaptable to that interface.
-        begin_after must be None (to leave the partial's current value) or a 
+        item must be an IJob, or be adaptable to that interface.
+        begin_after must be None (to leave the job's current value) or a 
         datetime.datetime.  begin_by must be None (to leave it alone) or a
         datetime.timedelta of the duration after the begin_after.
 
@@ -184,67 +290,49 @@
         will be converted to UTC, and errors because of this (such as
         pytz ambiguity errors) will be raised.
 
-        When an IPartial is put in the queue, the queue puts the
-        begin_after time and begin_by duration on the partial,
-        and the UUID of the Zope instance that put the partial in the
+        When an IJob is put in the queue, the queue puts the
+        begin_after time and begin_by duration on the job,
+        and the UUID of the Zope instance that put the job in the
         queue on the `assignerUUID`.
         """
 
-    def iterDue():
-        """return an iterable of all partials whose begin_after value is
-        less than or equal to now.  Any expired partials (begin_after +
-        begin_by > datetime.datetime.now(pytz.UTC)) are also included.
-        """
-
-    def pullNext(UUID):
-        """returns first due job that is available for the given UUID,
+    def claim(filter=None, default=None):
+        """returns first due job that is available for the given filter,
         removing it from the queue as appropriate; or None, if none are
         available. Responsible for including jobs to fail expired
-        partials, and jobs to decomission dead workers for the next
-        highest worker (sorted by UUID) if its (last_ping +
-        ping_interval + ping_death_interval) < now.  If this is the
-        highest worker UUID, cycles around to lowest."""
+        jobs."""
 
-class IWorkers(zope.interface.common.mapping.IEnumerableMapping):
+class IDispatcherAgents(zope.interface.common.mapping.IMapping):
+    """holds agents.  contained agents get a ``name`` and ``parent``
+    associated with this mapping."""
 
-    __parent__ = zope.interface.Attribute(
-        """the IDataManager of which this is a part.""")
+class IDispatchers(zope.interface.common.mapping.IEnumerableMapping):
 
-    def add(value):
-        """add an IWorker with key of value.UUID.  If value.UUID is None,
-        raise ValueError.  Set value.__parent__ to the IWorkers."""
+    def register(UUID):
+        "register UUID"
 
-    def remove(UUID):
-        """remove the registered IWorker with the give UUID.  Raise KeyError
-        if such does not exist."""
+    def unregister(UUID):
+        "unregister UUID"
 
-class IDataManager(zope.interface.Interface):
-    """Note that partials added to queues are not guaranteed to run in
-    the order added.  For sequencing, use IPartial.addCallbacks."""
+    def ping(UUID):
+        """responsible for setting ping time if necessary for this
+        dispatcher agent, and for decomissioning dead dispatchers for
+        the next highest dispatcher (sorted by UUID) if its (last_ping +
+        ping_interval + ping_death_interval) < now.  If this is the
+        highest dispatcher UUID, cycles around to lowest."""
 
-    thread = zope.interface.Attribute(
-        """An IPartialQueue of IPartials that should be run in a thread.""")
+class IQuota(zope.interface.common.mapping.IEnumerableMapping):
+    def clean():
+        ''
+    filled = zope.interface.Attribute(
+        "")
+    def add(item):
+        "add a job"
+    name = zope.interface.Attribute(
+        "")
+    parent = zope.interface.Attribute(
+        "")
 
-    reactor = zope.interface.Attribute(
-        """An IPartialQueue of IPartials that should be run in the main
-        loop (e.g., Twisted's main reactor loop).""")
-
-    workers = zope.interface.Attribute(
-        """An IWorkers of registered IWorker objects for this data manager;
-        these objects represent processes that are responsible for claiming
-        and performing the IPartials in the data manager.""")
-
-    def checkSibling(uuid):
-        """check the next sibling of uuid to see if it is dead, according
-        to its last_poll, and remove the engineUUID and schedule removal of its
-        partials if it is dead."""
-
-class IDataManagerAvailableEvent(zope.component.interfaces.IObjectEvent):
-    """object is data manager"""
-
-class DataManagerAvailable(zope.component.interfaces.ObjectEvent):
-    zope.interface.implements(IDataManagerAvailableEvent)
-
 class FullError(Exception):
     """Container is full.
     """
@@ -257,7 +345,7 @@
     def add(item):
         """same contract as IQueue.put, except if queue's len >= size, put will
         raise FullError, and all objects get __parent__ set to the queue;
-        and it will only store partials."""
+        and it will only store jobs."""
 
     __parent__ = zope.interface.Attribute(
         """a link to parent: an IWorker""")
@@ -273,86 +361,33 @@
 
 class ICompletedCollection(zope.interface.Interface):
     def __iter__():
-        """Iterate over partials in collection, from most recent `begin_after`
+        """Iterate over jobs in collection, from most recent `begin_after`
         to oldest"""
 
     def iter(start=None, stop=None):
-        """Iterate over partials in collection, starting and stopping with
+        """Iterate over jobs in collection, starting and stopping with
         given timezone-aware datetime values reasonably efficiently."""
 
     def __len__():
-        """Return number of partials in collection"""
+        """Return number of jobs in collection"""
 
-    def add(partial):
-        """Add partial to collection and set __parent__ to the collection."""
+    def add(job):
+        """Add job to collection and set __parent__ to the collection."""
 
     __parent__ = zope.interface.Attribute(
-        """an IWorker""")
+        """an IAgent""")
 
-    rotation_interval = zope.interface.Attribute(
-        """A datetime.timedelta of how often the buckets in the collection
-        should be rotated, to clean them out.""")
-
-    last_rotation = zope.interface.Attribute(
-        """A datetime.datetime in pytz.UTC of the last time a rotation was
-        performed (should initialize to the creation time).""")
-
     def first(start=None):
-        """Return the first (most recent) partial in the collection, starting
+        """Return the first (most recent) job in the collection, starting
         with optional timezone-aware datetime."""
 
     def last(stop=None):
-        """Return the last (oldest) partial in the collection, stopping
+        """Return the last (oldest) job in the collection, stopping
         with optional timezone-aware datetime."""
 
     def __nonzero__():
-        "whether collection contains any partials"
+        "whether collection contains any jobs"
 
-    def rotate():
-        """rotate buckets, eliminating the ones added longest ago.  Note that
-        this may be different than the ordering by begin_after."""
-
-class IWorker(zope.interface.Interface):
-
-    reactor = zope.interface.Attribute(
-        """An ISizedQueue of reactor partials currently being worked on by this
-        worker.""")
-
-    thread = zope.interface.Attribute(
-        """An ISizedQueue of thread partials currently being worked on by this
-        worker.""")
-
-    UUID = zope.interface.Attribute(
-        """The uuid.UUID that identifies this worker (where one instance ==
-        one process == one worker == one UUID).""")
-
-    engineUUID = zope.interface.Attribute(
-        """The uuid.UUID of the engine that is running this worker, or None.""")
-
-    last_ping = zope.interface.Attribute(
-        """the datetime.datetime in the pytz.UTC timezone of the last ping.
-        This date should be updated anytime a worker accepts a job in a
-        reactor or thread queue; and whenever, during a poll,
-        (last_ping + ping_interval) <= now.""")
-
-    poll_seconds = zope.interface.Attribute(
-        """The number of seconds between the end of one worker poll and the
-        start of the next.""")
-
-    ping_interval = zope.interface.Attribute(
-        """The approximate maximum datetime.timedelta between pings before
-        a new last_ping should be recorded.""")
-
-    ping_death_interval = zope.interface.Attribute(
-        """the datetime.timedelta after the last_ping + ping_interval that
-        signifies the workers death.  That is, if (last_ping + ping_interval +
-        ping_death_interval) < now, the worker should be regarded as dead.
-        """)
-
-    completed = zope.interface.Attribute(
-        """The most recent partials completed by this worker, in the order
-        from most recent `begin_after` to oldest.  ICompletedCollection.""")
-
 class IUUID(zope.interface.Interface):
     """A marker interface for the API of Ka-Ping Yee's uuid.UUID class.
     See http://zesty.ca/python/uuid.html """

Copied: zc.async/trunk/src/zc/async/job.py (from rev 85211, zc.async/branches/dev/src/zc/async/job.py)
===================================================================
--- zc.async/trunk/src/zc/async/job.py	                        (rev 0)
+++ zc.async/trunk/src/zc/async/job.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,374 @@
+import types
+import datetime
+
+import BTrees.OOBTree
+import ZODB.POSException
+import transaction.interfaces
+import persistent
+import persistent.list
+import persistent.mapping
+import twisted.internet.defer
+import twisted.python.failure
+import zope.interface
+import zc.queue
+import zc.twist
+import rwproperty
+import pytz
+
+import zc.async.interfaces
+import zc.async.utils
+
+def _repr(obj):
+    if isinstance(obj, persistent.Persistent):
+        dbname = "?"
+        if obj._p_jar is not None:
+            dbname = getattr(obj._p_jar.db(), 'database_name', "?")
+            if dbname != '?':
+                dbname = repr(dbname)
+        if obj._p_oid is not None:
+            oid = ZODB.utils.u64(obj._p_oid)
+        else:
+            oid = '?'
+        return '%s.%s (oid %s, db %s)' % (
+            obj.__class__.__module__,
+            obj.__class__.__name__,
+            oid,
+            dbname)
+    elif isinstance(obj, types.FunctionType):
+        return '%s.%s' % (obj.__module__, obj.__name__)
+    else:
+        return repr(obj)
+
+def success_or_failure(success, failure, res):
+    callable = None
+    if isinstance(res, twisted.python.failure.Failure):
+        if failure is not None:
+            callable = failure
+    elif success is not None:
+        callable = success
+    if callable is None:
+        return res
+    return callable(res)
+
+def completeStartedJobArguments(job, result):
+    if isinstance(result, twisted.python.failure.Failure):
+        for collection in (job.args, job.kwargs.values()):
+            for a in collection:
+                if zc.async.interfaces.IJob.providedBy(a):
+                    status = a.status
+                    if status == zc.async.interfaces.ACTIVE:
+                        a.fail()
+                    elif status == zc.async.interfaces.CALLBACKS:
+                        a.resumeCallbacks()
+
+class Job(zc.async.utils.Base):
+
+    zope.interface.implements(zc.async.interfaces.IJob)
+
+    _callable_root = _callable_name = _result = None
+    _status = zc.async.interfaces.NEW
+    _begin_after = _begin_by = _active_start = _active_end = None
+    key = None
+    
+    assignerUUID = None
+    _quota_names = ()
+
+    def __init__(self, *args, **kwargs):
+        self.args = persistent.list.PersistentList(args) # TODO: blist
+        self.callable = self.args.pop(0)
+        self.kwargs = persistent.mapping.PersistentMapping(kwargs)
+        self.callbacks = zc.queue.PersistentQueue()
+        self.annotations = BTrees.OOBTree.OOBTree()
+
+    @property
+    def active_start(self):
+        return self._active_start
+
+    @property
+    def active_end(self):
+        return self._active_end
+
+    @property
+    def initial_callbacks_end(self):
+        return self.key and zc.async.utils.long_to_dt(self.key).replace(
+            tzinfo=pytz.UTC)
+
+    @property
+    def quota_names(self):
+        return self._quota_names
+    @rwproperty.setproperty
+    def quota_names(self, value):
+        if isinstance(value, basestring):
+            raise TypeError('provide an iterable of names')
+        status = self.status
+        if status != zc.async.interfaces.NEW:
+            if status == zc.async.interfaces.PENDING:
+                quotas = self.queue.quotas
+                for name in value:
+                    if name not in quotas:
+                        raise ValueError('unknown quota name', name)
+            else:
+                raise zc.async.interfaces.BadStatusError(
+                    'can only set quota_names when a job has NEW or PENDING '
+                    'status')
+        self._quota_names = tuple(value)
+
+    @property
+    def begin_after(self):
+        return self._begin_after
+    @rwproperty.setproperty
+    def begin_after(self, value):
+        if self.status != zc.async.interfaces.NEW:
+            raise zc.async.interfaces.BadStatusError(
+                'can only set begin_after when a job has NEW status')
+        if value is not None:
+            if value.tzinfo is None:
+                raise ValueError('cannot use timezone-naive values')
+            else:
+                value = value.astimezone(pytz.UTC)
+        self._begin_after = value
+
+    @property
+    def begin_by(self):
+        return self._begin_by
+    @rwproperty.setproperty
+    def begin_by(self, value):
+        if self.status not in (zc.async.interfaces.PENDING,
+                               zc.async.interfaces.NEW):
+            raise zc.async.interfaces.BadStatusError(
+                'can only set begin_by when a job has NEW or PENDING status')
+        if value is not None:
+            if value < datetime.timedelta():
+                raise ValueError('negative values are not allowed')
+        self._begin_by = value
+
+    @property
+    def queue(self):
+        ob = self.parent
+        while (ob is not None and
+               (zc.async.interfaces.IJob.providedBy(ob) or
+                zc.async.interfaces.IAgent.providedBy(ob) or
+                zc.async.interfaces.IDispatcherAgents.providedBy(ob))):
+            ob = ob.parent
+        if not zc.async.interfaces.IQueue.providedBy(ob):
+            ob = None
+        return ob
+
+    @property
+    def agent(self):
+        ob = self.parent
+        while (ob is not None and
+               zc.async.interfaces.IJob.providedBy(ob)):
+            ob = ob.parent
+        if not zc.async.interfaces.IAgent.providedBy(ob):
+            ob = None
+        return ob
+
+    @property
+    def result(self):
+        return self._result
+
+    @property
+    def status(self):
+        # NEW -> (PENDING -> ASSIGNED ->) ACTIVE -> CALLBACKS -> COMPLETED
+        if self._status == zc.async.interfaces.NEW:
+            ob = self.parent
+            while (ob is not None and
+                   zc.async.interfaces.IJob.providedBy(ob)):
+                ob = ob.parent
+            if zc.async.interfaces.IAgent.providedBy(ob):
+                return zc.async.interfaces.ASSIGNED
+            elif zc.async.interfaces.IQueue.providedBy(ob):
+                return zc.async.interfaces.PENDING
+        return self._status
+
+    @classmethod
+    def bind(klass, *args, **kwargs):
+        res = klass(*args, **kwargs)
+        res.args.insert(0, res)
+        return res
+
+    def __repr__(self):
+        try:
+            call = _repr(self._callable_root)
+            if self._callable_name is not None:
+                call += ' :' + self._callable_name
+            args = ', '.join(_repr(a) for a in self.args)
+            kwargs = ', '.join(k+"="+_repr(v) for k, v in self.kwargs.items())
+            if args:
+                if kwargs:
+                    args += ", " + kwargs
+            else:
+                args = kwargs
+            return '<%s ``%s(%s)``>' % (_repr(self), call, args)
+        except (TypeError, ValueError, AttributeError):
+            # broken reprs are a bad idea; they obscure problems
+            return super(Job, self).__repr__()
+
+    @property
+    def callable(self):
+        if self._callable_name is None:
+            return self._callable_root
+        else:
+            return getattr(self._callable_root, self._callable_name)
+    @rwproperty.setproperty
+    def callable(self, value):
+        # can't pickle/persist methods by default as of this writing, so we
+        # add the sugar ourselves
+        if self._status != zc.async.interfaces.NEW:
+            raise zc.async.interfaces.BadStatusError(
+                'can only set callable when a job has NEW, PENDING, or '
+                'ASSIGNED status')
+        if isinstance(value, types.MethodType):
+            self._callable_root = value.im_self
+            self._callable_name = value.__name__
+        elif isinstance(value, zc.twist.METHOD_WRAPPER_TYPE):
+            self._callable_root = zc.twist.get_self(value)
+            self._callable_name = value.__name__
+        else:
+            self._callable_root, self._callable_name = value, None
+
+    def addCallbacks(self, success=None, failure=None):
+        if success is not None or failure is not None:
+            if success is not None:
+                success = zc.async.interfaces.IJob(success)
+            if failure is not None:
+                failure = zc.async.interfaces.IJob(failure)
+            res = Job(success_or_failure, success, failure)
+            if success is not None:
+                success.parent = res
+            if failure is not None:
+                failure.parent = res
+            self.addCallback(res)
+            # we need to handle the case of callbacks on the internal success/
+            # failure jobs, to be safe.
+            abort_handler = zc.async.interfaces.IJob(
+                completeStartedJobArguments)
+            abort_handler.args.append(res)
+            res.addCallback(abort_handler)
+        else:
+            res = self
+        return res
+
+    def addCallback(self, callback):
+        callback = zc.async.interfaces.IJob(callback)
+        self.callbacks.put(callback)
+        callback.parent = self
+        if self._status == zc.async.interfaces.COMPLETED:
+            callback(self.result) # this commits transactions!
+        else:
+            self._p_changed = True # to try and fire conflict errors if
+            # our reading of self.status has changed beneath us
+        return callback
+
+    def __call__(self, *args, **kwargs):
+        if self.status not in (zc.async.interfaces.NEW,
+                               zc.async.interfaces.ASSIGNED):
+            raise zc.async.interfaces.BadStatusError(
+                'can only call a job with NEW or ASSIGNED status')
+        tm = transaction.interfaces.ITransactionManager(self)
+        self._status = zc.async.interfaces.ACTIVE
+        self._active_start = datetime.datetime.now(pytz.UTC)
+        tm.commit()
+        effective_args = list(args)
+        effective_args[0:0] = self.args
+        effective_kwargs = dict(self.kwargs)
+        effective_kwargs.update(kwargs)
+        return self._call_with_retry(
+            lambda: self.callable(*effective_args, **effective_kwargs))
+
+    def _call_with_retry(self, call):
+        ct = 0
+        tm = transaction.interfaces.ITransactionManager(self)
+        res = None
+        while 1:
+            try:
+                res = call()
+                if zc.async.interfaces.IJob.providedBy(res):
+                    res.addCallback(self._callback)
+                    tm.commit()
+                elif isinstance(res, twisted.internet.defer.Deferred):
+                    res.addBoth(zc.twist.Partial(self._callback))
+                    tm.commit()
+                else:
+                    res = self._complete(res, tm)
+            except ZODB.POSException.TransactionError:
+                tm.abort()
+                ct += 1
+                if ct >= 5:
+                    res = self._complete(zc.twist.Failure(), tm)
+                    self.resumeCallbacks()
+                else:
+                    continue
+            except zc.twist.EXPLOSIVE_ERRORS:
+                tm.abort()
+                raise
+            except:
+                tm.abort()
+                res = self._complete(zc.twist.Failure(), tm)
+                self.resumeCallbacks()
+            else:
+                if self._status == zc.async.interfaces.CALLBACKS:
+                    self.resumeCallbacks()
+            return res
+
+    def _callback(self, res):
+        self._call_with_retry(lambda: res)
+
+    def _complete(self, res, tm):
+        if isinstance(res, twisted.python.failure.Failure):
+            res = zc.twist.sanitize(res)
+        self._result = res
+        self._status = zc.async.interfaces.CALLBACKS
+        self._active_end = datetime.datetime.now(pytz.UTC)
+        tm.commit()
+        return res
+
+    def fail(self, e=None):
+        if e is None:
+            e = zc.async.interfaces.AbortedError()
+        if self._status not in (zc.async.interfaces.NEW,
+                                zc.async.interfaces.ACTIVE):
+            raise zc.async.interfaces.BadStatusError(
+                'can only call fail on a job with NEW, PENDING, ASSIGNED, or '
+                'ACTIVE status')
+        self._complete(zc.twist.Failure(e),
+                       transaction.interfaces.ITransactionManager(self))
+        self.resumeCallbacks()
+
+    def resumeCallbacks(self):
+        if self._status != zc.async.interfaces.CALLBACKS:
+            raise zc.async.interfaces.BadStatusError(
+                'can only resumeCallbacks on a job with CALLBACKS status')
+        callbacks = list(self.callbacks)
+        tm = transaction.interfaces.ITransactionManager(self)
+        length = 0
+        while 1:
+            for j in callbacks:
+                if j._status == zc.async.interfaces.NEW:
+                    j(self.result)
+                elif j._status == zc.async.interfaces.ACTIVE:
+                    j.fail()
+                elif j._status == zc.async.interfaces.CALLBACKS:
+                    j.resumeCallbacks()
+                # TODO: this shouldn't raise anything we want to catch, right?
+                # now, this should catch all the errors except EXPLOSIVE_ERRORS
+                # cleaning up dead jobs should look something like the above.
+            tm.commit()
+            tm.begin() # syncs
+            # it's possible that someone added some callbacks run until
+            # we're exhausted.
+            length += len(callbacks)
+            callbacks = list(self.callbacks)[length:]
+            if not callbacks:
+                try:
+                    self._status = zc.async.interfaces.COMPLETED
+                    if zc.async.interfaces.IAgent.providedBy(self.parent):
+                        self.parent.jobCompleted(self)
+                    tm.commit()
+                except ZODB.POSException.TransactionError:
+                    tm.abort()
+                    callbacks = list(self.callbacks)[length:]
+                else:
+                    break # and return
+

Copied: zc.async/trunk/src/zc/async/job.txt (from rev 85211, zc.async/branches/dev/src/zc/async/job.txt)
===================================================================
--- zc.async/trunk/src/zc/async/job.txt	                        (rev 0)
+++ zc.async/trunk/src/zc/async/job.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,921 @@
+====
+Jobs
+====
+
+What if you want to persist a reference to the method of a persistent
+object--you can't persist that normally in the ZODB, but that can be
+very useful, especially to store asynchronous calls.  What if you want
+to act on the result of an asynchronous call that may be called later?
+The zc.async package offers an approach that combines ideas of a partial
+and that of Twisted deferred code: ``zc.async.job.Job``.  
+
+To use it, simply wrap the callable--a method of a persistent object or
+a callable persistent object or a global function--in the job.  You can
+include ordered and keyword arguments to the job, which may be
+persistent objects or simply pickleable objects.
+
+Unlike a partial but like a Twisted deferred, the result of the wrapped
+call goes on the job's ``result`` attribute, and the immediate return of
+the call might not be the job's end result.  It could also be a failure,
+indicating an exception; or another partial, indicating that we are
+waiting to be called back by the second partial; or a twisted deferred,
+indicating that we are waiting to be called back by a twisted Deferred
+(see the ``zc.twist``).  After you have the partial, you can then use a
+number of methods and attributes on the partial for further set up. 
+Let's show the most basic use first, though.
+
+Note that, even though this looks like an interactive prompt, all
+functions and classes defined in this document act as if they were
+defined within a module.  Classes and functions defined in an interactive
+prompt are normally not picklable, and Jobs must work with
+picklable objects [#set_up]_.
+
+    >>> import zc.async.job
+    >>> def call():
+    ...     print 'hello world'
+    ...     return 'my result'
+    ...
+    >>> j = root['j'] = zc.async.job.Job(call)
+    >>> import transaction
+    >>> transaction.commit()
+
+Now we have a job [#verify]_.  The __repr__ tries to be helpful, identifying
+the persistent object identifier ("oid") in hex and the database ("db"), and
+trying to render the call.
+
+    >>> j # doctest: +ELLIPSIS
+    <zc.async.job.Job (oid ... db 'unnamed') ``zc.async.doctest_test.call()``>
+
+Initially it has a NEW status.
+
+    >>> import zc.async.interfaces
+    >>> j.status == zc.async.interfaces.NEW
+    True
+
+We can call the job from the NEW (or ASSIGNED, see later) status, and
+then see that the function was called, and see the result on the partial.
+
+    >>> res = j()
+    hello world
+    >>> j.result
+    'my result'
+    >>> j.status == zc.async.interfaces.COMPLETED
+    True
+
+The result of the job also happens to be the end result of the call,
+but as mentioned above, the job may return a deferred or another job.
+
+    >>> res
+    'my result'
+
+In addition to using a global function, we can also use a method of a
+persistent object.  Imagine we have a ZODB root that we can put objects
+in to.
+
+    >>> import persistent
+    >>> class Demo(persistent.Persistent):
+    ...     counter = 0
+    ...     def increase(self, value=1):
+    ...         self.counter += value
+    ...
+    >>> demo = root['demo'] = Demo()
+    >>> demo.counter
+    0
+    >>> j = root['j'] = zc.async.job.Job(demo.increase)
+    >>> j # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    <zc.async.job.Job (oid ?, db ?)
+     ``zc.async.doctest_test.Demo (oid ?, db ?) :increase()``>
+
+    >>> transaction.commit()
+    >>> j # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    <zc.async.job.Job (oid ..., db 'unnamed')
+     ``zc.async.doctest_test.Demo (oid ..., db 'unnamed') :increase()``>
+    >>> j() # result is None
+    >>> demo.counter
+    1
+
+So our two calls so far have returned direct successes.  This one returns
+a failure, because the wrapped call raises an exception.
+
+    >>> def callFailure():
+    ...     raise RuntimeError('Bad Things Happened Here')
+    ...
+    >>> j = root['j'] = zc.async.job.Job(callFailure)
+    >>> transaction.commit()
+    >>> res = j()
+    >>> j.result
+    <zc.twist.Failure exceptions.RuntimeError>
+
+These are standard twisted Failures, except that frames in the stored
+traceback have been converted to reprs, so that we don't keep references
+around when we pass the Failures around (over ZEO, for instance)
+[#no_live_frames]_.  This doesn't stop us from getting nice tracebacks,
+though.
+
+    >>> print j.result.getTraceback() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    Traceback (most recent call last):
+    ...
+    exceptions.RuntimeError: Bad Things Happened Here
+
+Note that all calls can return a failure explicitly, rather than raising
+an exception that the job converts to an exception.  However, there
+is an important difference in behavior.  If a wrapped call raises an
+exception, the job aborts the transaction; but if the wrapped call
+returns a failure, no abort occurs.  Wrapped calls that explicitly return
+failures are thus responsible for any necessary transaction aborts.  See
+the footnote for an example [#explicit_failure_example]_.
+
+Now let's return a job from the job.  This generally represents a result
+that is waiting on another asynchronous persistent call, which would
+normally be called by a worker thread in a dispatcher.  We'll fire the
+second call ourselves for this demonstration.
+
+    >>> def innerCall():
+    ...     return 42
+    ...
+    >>> ij = root['ij'] = zc.async.job.Job(innerCall)
+    >>> def callJob():
+    ...     return ij
+    ...
+    >>> j = root['j'] = zc.async.job.Job(callJob)
+    >>> transaction.commit()
+    >>> res = j()
+    >>> res is ij
+    True
+
+While we are waiting for the result, the status is ACTIVE.
+
+    >>> j.status == zc.async.interfaces.ACTIVE
+    True
+
+When we call the inner job, the result will be placed on the outer job.
+
+    >>> j.result # None
+    >>> res = ij()
+    >>> j.result
+    42
+    >>> j.status == zc.async.interfaces.COMPLETED
+    True
+
+This is accomplished with callbacks, discussed below in the Callbacks_
+section.
+
+Now we'll return a Twisted deferred.  The story is almost identical to
+the inner job story, except that, in our demonstration, we must handle
+transactions, because the deferred story uses the ``zc.twist`` package
+to let the Twisted reactor communicate safely with the ZODB: see
+the package's README for details.
+
+    >>> import twisted.internet.defer
+    >>> inner_d = twisted.internet.defer.Deferred()
+    >>> def callDeferred():
+    ...     return inner_d
+    ...
+    >>> j = root['j2'] = zc.async.job.Job(callDeferred)
+    >>> transaction.commit()
+    >>> res = j()
+    >>> res is inner_d
+    True
+    >>> j.status == zc.async.interfaces.ACTIVE
+    True
+    >>> j.result # None
+
+After the deferred receives its result, we need to sync our connection to see
+it.
+
+    >>> inner_d.callback(42)
+    >>> j.result # still None; we need to sync our connection to see the result
+    >>> j.status == zc.async.interfaces.ACTIVE # it's completed, but need to sync
+    True
+    >>> trans = transaction.begin() # sync our connection
+    >>> j.result
+    42
+    >>> j.status == zc.async.interfaces.COMPLETED
+    True
+
+As the last step in looking at the basics, let's look at passing arguments
+into the job.  They can be persistent objects or generally picklable
+objects, and they can be ordered or keyword arguments.
+
+    >>> class PersistentDemo(persistent.Persistent):
+    ...     def __init__(self, value=0):
+    ...         self.value = value
+    ...
+    >>> root['demo2'] = PersistentDemo()
+    >>> import operator
+    >>> def argCall(ob, ob2=None, value=0, op=operator.add):
+    ...     for o in (ob, ob2):
+    ...         if o is not None:
+    ...             o.value = op(o.value, value)
+    ...
+    >>> j = root['j3'] = zc.async.job.Job(
+    ...     argCall, root['demo2'], value=4)
+    >>> transaction.commit()
+    >>> j # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    <zc.async.job.Job (oid ..., db 'unnamed')
+     ``zc.async.doctest_test.argCall(zc.async.doctest_test.PersistentDemo (oid ..., db 'unnamed'),
+                                     value=4)``>
+    >>> j()
+    >>> root['demo2'].value
+    4
+
+And, of course, this job acts as a partial: we can specify some
+arguments when the job is made, and some when it is called.
+
+    >>> root['demo3'] = PersistentDemo(10)
+    >>> j = root['j3'] = zc.async.job.Job(
+    ...     argCall, root['demo2'], value=4)
+    >>> transaction.commit()
+    >>> j(root['demo3'], op=operator.mul)
+    >>> root['demo2'].value
+    16
+    >>> root['demo3'].value
+    40
+
+This last feature makes jobs possible to use for callbacks: our next
+topic.
+
+Callbacks
+---------
+
+The job object can also be used to handle return values and
+exceptions from the call.  The ``addCallbacks`` method enables the
+functionality.  Its signature is (success=None, failure=None).  It may
+be called multiple times, each time adding a success and/or failure
+callable that takes an end result: a value or a zc.async.Failure object,
+respectively.  Failure objects are passed to failure callables, and
+any other results are passed to success callables.
+
+The return value of the success and failure callables is
+important for chains and for determining whether a job had any
+errors that need logging, as we'll see below.  The call to
+``addCallbacks`` returns a job, which can be used for chaining (see
+``Chaining Callbacks``_).
+
+Let's look at a simple example.
+
+    >>> def call(*args):
+    ...     res = 1
+    ...     for a in args:
+    ...         res *= a
+    ...     return res
+    ...
+    >>> def callback(res):
+    ...     return 'the result is %r' % (res,)
+    ...
+    >>> j = root['j4'] = zc.async.job.Job(call, 2, 3)
+    >>> j_callback = j.addCallbacks(callback)
+    >>> transaction.commit()
+    >>> res = j(4)
+    >>> j.result
+    24
+    >>> res
+    24
+    >>> j_callback.result
+    'the result is 24'
+
+Here are some callback examples adding a success and a failure
+simultaneously.  This one causes a success...
+
+    >>> def multiply(first, second, third=None):
+    ...     res = first * second
+    ...     if third is not None:
+    ...         res *= third
+    ...     return res
+    ...
+    >>> j = root['j'] = zc.async.job.Job(multiply, 5, 3)
+    >>> transaction.commit()
+    >>> def success(res):
+    ...     print "success!", res
+    ...
+    >>> def failure(f):
+    ...     print "failure.", f
+    ...
+    >>> j.addCallbacks(success, failure) # doctest: +ELLIPSIS
+    <zc.async.job.Job ...>
+    >>> res = j()
+    success! 15
+
+...and this one a failure.
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 5, None)
+    >>> transaction.commit()
+    >>> j.addCallbacks(success, failure) # doctest: +ELLIPSIS
+    <zc.async.job.Job (oid ...>
+    >>> res = j() # doctest: +ELLIPSIS
+    failure. [Failure instance: Traceback: exceptions.TypeError...]
+
+you can also add multiple callbacks.
+
+    >>> def also_success(val):
+    ...     print "also a success!", val
+    ...
+    >>> def also_failure(f):
+    ...     print "also a failure.", f
+    ...
+    >>> j = root['j'] = zc.async.job.Job(multiply, 5, 3)
+    >>> transaction.commit()
+    >>> j.addCallbacks(success) # doctest: +ELLIPSIS
+    <zc.async.job.Job (oid ...>
+    >>> j.addCallbacks(also_success) # doctest: +ELLIPSIS
+    <zc.async.job.Job (oid ...>
+    >>> res = j()
+    success! 15
+    also a success! 15
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 5, None)
+    >>> transaction.commit()
+    >>> j.addCallbacks(failure=failure) # doctest: +ELLIPSIS
+    <zc.async.job.Job (oid ...>
+    >>> j.addCallbacks(failure=also_failure) # doctest: +ELLIPSIS
+    <zc.async.job.Job (oid ...>
+    >>> res = j() # doctest: +ELLIPSIS
+    failure. [Failure instance: Traceback: exceptions.TypeError...]
+    also a failure. [Failure instance: Traceback: exceptions.TypeError...]
+
+Chaining Callbacks
+------------------
+
+Sometimes it's desirable to have a chain of callables, so that one callable
+effects the input of another.  The returned job from addCallables can
+be used for that purpose.  Effectively, the logic for addCallables is this:
+
+    def success_or_failure(success, failure, res):
+        if zc.async.interfaces.IFailure.providedBy(res):
+            if failure is not None:
+                res = failure(res)
+        elif success is not None:
+            res = success(res)
+        return res
+
+    class Job(...):
+        ...
+        def addCallbacks(self, success=None, failure=None):
+            if success is None and failure is None:
+                return
+            res = Job(success_or_failure, success, failure)
+            self.callbacks.append(res)
+            return res
+
+Here's a simple chain, then.  We multiply 5 * 3, then that result by 4, then
+print the result in the ``success`` function.
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 5, 3)
+    >>> transaction.commit()
+    >>> j.addCallbacks(zc.async.job.Job(multiply, 4)
+    ...               ).addCallbacks(success) # doctest: +ELLIPSIS
+    <zc.async.job.Job (oid ...>
+    >>> res = j()
+    success! 60
+
+A less artificial use case is to handle errors (like try...except) or do
+cleanup (like try...finally).  Here's an example of handling errors.
+
+    >>> def handle_failure(f):
+    ...     return 0
+    >>> j = root['j'] = zc.async.job.Job(multiply, 5, None)
+    >>> transaction.commit()
+    >>> j.addCallbacks(
+    ...     failure=handle_failure).addCallbacks(success) # doctest: +ELLIPSIS
+    <zc.async.job.Job (oid ...>
+    >>> res = j()
+    success! 0
+
+    >>> isinstance(j.result, twisted.python.failure.Failure)
+    True
+
+Callbacks on Completed Job
+--------------------------
+
+When you add a callback to a job that has been completed, it is performed
+immediately.
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 5, 2)
+    >>> transaction.commit()
+    >>> res = j()
+    >>> j.result
+    10
+    >>> j.status == zc.async.interfaces.COMPLETED
+    True
+    >>> j_callback = j.addCallbacks(zc.async.job.Job(multiply, 3))
+    >>> j_callback.result
+    30
+    >>> j.status == zc.async.interfaces.COMPLETED
+    True
+
+Chaining Jobs
+-------------
+
+It's also possible to achieve a somewhat similar pattern by using a
+job as a success or failure callable, and then add callbacks to the
+second job.  This differs from the other approach in that you are only
+adding callbacks to one side, success or failure, not the effective
+combined result; and errors are nested in arguments.
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 5, 3)
+    >>> transaction.commit()
+    >>> j_callback = j.addCallbacks(success)
+    >>> j2 = zc.async.job.Job(multiply, 4)
+    >>> j_callback_2 = j.addCallbacks(j2)
+    >>> j_callback_3 = j2.addCallbacks(also_success)
+    >>> res = j()
+    success! 15
+    also a success! 60
+
+Failing
+-------
+
+Speaking again of failures, it's worth discussing two other aspects of
+failing.  One is that jobs offer an explicit way to fail a call.  It
+can be called when the job has a NEW, PENDING, ASSIGNED or ACTIVE status. 
+The primary use cases for this method are to cancel a job that is
+overdue to start, and to cancel a job that was in progress by a
+worker thread in a dispatcher when the dispatcher died (more on that below).
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 5, 2)
+    >>> transaction.commit()
+    >>> j.fail()
+    >>> print j.result.getTraceback() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    Traceback (most recent call last):
+    ...
+    zc.async.interfaces.AbortedError:
+
+``fail`` calls all failure callbacks with the failure.
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 5, 2)
+    >>> j_callback = j.addCallbacks(failure=failure)
+    >>> transaction.commit()
+    >>> res = j.fail() # doctest: +ELLIPSIS
+    failure. [Failure instance: Traceback...zc.async.interfaces.AbortedError...]
+
+As seen above, it fails with zc.async.interfaces.AbortedError by default.
+You can also pass in a different error.
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 5, 2)
+    >>> transaction.commit()
+    >>> j.fail(RuntimeError('failed'))
+    >>> print j.result.getTraceback() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    Traceback (most recent call last):
+    ...
+    exceptions.RuntimeError: failed
+
+As mentioned, if a dispatcher dies when working on an active task, the
+active task should be aborted using ``fail``, so the method also works if
+a job has the ACTIVE status.  We'll reach under the covers to show this.
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 5, 2)
+    >>> j._status = zc.async.interfaces.ACTIVE
+    >>> transaction.commit()
+    >>> j.fail()
+    >>> print j.result.getTraceback() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    Traceback (most recent call last):
+    ...
+    zc.async.interfaces.AbortedError:
+
+It won't work for failing tasks in COMPLETED or CALLBACKS status.
+
+    >>> j.fail()
+    Traceback (most recent call last):
+    ...
+    BadStatusError: can only call fail on a job with NEW, PENDING, ASSIGNED, or ACTIVE status
+    >>> j._status = zc.async.interfaces.CALLBACKS
+    >>> j.fail()
+    Traceback (most recent call last):
+    ...
+    BadStatusError: can only call fail on a job with NEW, PENDING, ASSIGNED, or ACTIVE status
+
+Using ``resumeCallbacks``
+-------------------------
+
+So ``fail`` is the proper way to handle an active job that was being
+worked on by on eof a dead dispatcher's worker thread, but how does one
+handle a job that was in the CALLBACKS status?  The answer is to use
+resumeCallbacks.  Any job that is still pending will be called; any
+job that is active will be failed; any job that is in the middle
+of calling its own callbacks will have its ``resumeCallbacks`` called; and
+any job that is completed will be ignored.
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 5, 2)
+    >>> j._result = 10
+    >>> j._status = zc.async.interfaces.CALLBACKS
+    >>> completed_j = zc.async.job.Job(multiply, 3)
+    >>> callbacks_j = zc.async.job.Job(multiply, 4)
+    >>> callbacks_j._result = 40
+    >>> callbacks_j._status = zc.async.interfaces.CALLBACKS
+    >>> sub_callbacks_j = callbacks_j.addCallbacks(
+    ...     zc.async.job.Job(multiply, 2))
+    >>> active_j = zc.async.job.Job(multiply, 5)
+    >>> active_j._status = zc.async.interfaces.ACTIVE
+    >>> pending_j = zc.async.job.Job(multiply, 6)
+    >>> for _j in completed_j, callbacks_j, active_j, pending_j:
+    ...     j.callbacks.put(_j)
+    ...
+    >>> transaction.commit()
+    >>> res = completed_j(10)
+    >>> j.resumeCallbacks()
+    >>> sub_callbacks_j.result
+    80
+    >>> sub_callbacks_j.status == zc.async.interfaces.COMPLETED
+    True
+    >>> print active_j.result.getTraceback()
+    ... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    Traceback (most recent call last):
+    ...
+    zc.async.interfaces.AbortedError:
+    >>> active_j.status == zc.async.interfaces.COMPLETED
+    True
+    >>> pending_j.result
+    60
+    >>> pending_j.status == zc.async.interfaces.COMPLETED
+    True
+
+Introspecting and Mutating Arguments
+------------------------------------
+
+Job arguments can be introspected and mutated.
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 5, 3)
+    >>> transaction.commit()
+    >>> j.args
+    [5, 3]
+    >>> j.kwargs
+    {}
+    >>> j.kwargs['third'] = 2
+    >>> j()
+    30
+
+This can allow wrapped callables to have a reference to the job
+itself.
+
+    >>> def show(v):
+    ...     print v
+    ...
+    >>> j = root['j'] = zc.async.job.Job(show)
+    >>> transaction.commit()
+    >>> j.args.append(j)
+    >>> res = j() # doctest: +ELLIPSIS
+    <zc.async.job.Job (oid ...>
+
+A class method on Job, ``bind``, can simplify this.  It puts the job as
+the first argument to the callable, as if the callable were bound as a method
+on the job.
+
+    >>> j = root['j'] = zc.async.job.Job.bind(show)
+    >>> transaction.commit()
+    >>> res = j() # doctest: +ELLIPSIS
+    <zc.async.job.Job (oid ...>
+
+Result and Status
+-----------------
+
+Jobs know about their status, and after a successful call also know
+their result, whether it is a Failure or another value.  Possible
+statuses are the constants in zc.async.interfaces named NEW, PENDING,
+ASSIGNED, ACTIVE, CALLBACKS, and COMPLETED.
+
+Without the rest of zc.async, the status values are simply NEW, ACTIVE,
+CALLBACKS, and COMPLETED.
+
+    >>> def showStatus(job, *ignore):
+    ...     status = job.status
+    ...     for nm in ('NEW', 'PENDING', 'ASSIGNED', 'ACTIVE', 'CALLBACKS',
+    ...                'COMPLETED'):
+    ...         val = getattr(zc.async.interfaces, nm)
+    ...         if status == val:
+    ...             print nm
+    ...
+    >>> j = root['j'] = zc.async.job.Job.bind(showStatus)
+    >>> transaction.commit()
+    >>> j_callback = j.addCallbacks(zc.async.job.Job(showStatus, j))
+
+    >>> showStatus(j)
+    NEW
+    >>> j.result # None
+    >>> res = j()
+    ACTIVE
+    CALLBACKS
+    >>> showStatus(j)
+    COMPLETED
+
+Setting the ``parent`` attribute to a queue changes the status to PENDING,
+and setting it to an agent changes the status to ASSIGNED.  In this case,
+the common status flow should be as follows: NEW -> PENDING -> ASSIGNED ->
+ACTIVE -> CALLBACKS -> COMPLETED.  Here's the same example above, along with
+setting the ``parent`` to change the status.
+
+    >>> j = root['j'] = zc.async.job.Job.bind(showStatus)
+    >>> transaction.commit()
+    >>> j_callback = j.addCallbacks(zc.async.job.Job(showStatus, j))
+
+    >>> showStatus(j)
+    NEW
+
+    >>> print j.queue
+    None
+    >>> print j.agent
+    None
+    >>> import zc.async.interfaces
+    >>> import zope.interface
+    >>> import zc.async.utils
+    >>> import datetime
+    >>> import pytz
+    >>> class StubQueue:
+    ...     zope.interface.implements(zc.async.interfaces.IQueue)
+    ...
+    >>> class StubDispatcherAgents:
+    ...     zope.interface.implements(zc.async.interfaces.IDispatcherAgents)
+    ...
+    >>> class StubAgent:
+    ...     zope.interface.implements(zc.async.interfaces.IAgent)
+    ...     def jobCompleted(self, job):
+    ...         job.key = zc.async.utils.dt_to_long(
+    ...             datetime.datetime.now(pytz.UTC))
+    ...
+    >>> queue = StubQueue()
+    >>> dispatcheragents = StubDispatcherAgents()
+    >>> agent = StubAgent()
+    >>> agent.parent = dispatcheragents
+    >>> dispatcheragents.parent = queue
+    >>> j.parent = queue
+    >>> j.queue is queue
+    True
+    >>> j.status == zc.async.interfaces.PENDING
+    True
+    >>> j.parent = agent
+    >>> j.queue is queue
+    True
+    >>> j.agent is agent
+    True
+    >>> j.status == zc.async.interfaces.ASSIGNED
+    True
+
+    >>> j.result # None
+    >>> res = j()
+    ACTIVE
+    CALLBACKS
+    >>> showStatus(j)
+    COMPLETED
+
+A job may only be called when the status is NEW or ASSIGNED: calling a
+partial again raises a BadStatusError.
+
+    >>> j()
+    Traceback (most recent call last):
+    ...
+    BadStatusError: can only call a job with NEW or ASSIGNED status
+
+Other similar restrictions include the following:
+
+- A job may not call itself [#call_self]_.
+
+- Also, a job's direct callback may not call the job
+  [#callback_self]_.
+
+More Job Introspection
+----------------------
+
+We've already shown that it is possible to introspect status, result,
+args, and kwargs.  Two other aspects of the basic job functionality are
+introspectable: callable and callbacks.
+
+The callable is the callable (function or method of a picklable object) that
+the job will call.  You can change it while the job is in a pending
+status.
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 2)
+    >>> j.callable is multiply
+    True
+    >>> j.callable = root['demo'].increase
+    >>> j.callable == root['demo'].increase
+    True
+    >>> transaction.commit()
+    >>> root['demo'].counter
+    2
+    >>> res = j()
+    >>> root['demo'].counter
+    4
+
+The callbacks are a queue of the callbacks added by addCallbacks (or the
+currently experimental and underdocumented addCallback).  Currently the
+code may allow for direct mutation of the callbacks, but it is strongly
+suggested that you do not mutate the callbacks, especially not adding them
+except through addCallbacks or addCallback.
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 2, 8)
+    >>> len(j.callbacks)
+    0
+    >>> j_callback = j.addCallbacks(zc.async.job.Job(multiply, 5))
+    >>> len(j.callbacks)
+    1
+
+When you use ``addCallbacks``, the job you get back has a callable with
+the success and failure jobs you passed in as arguments.  Moreover, the
+job you get back already has a callback, for safety reasons.  If a
+dispatcher dies while the job is in progress, active argument jobs
+should be cleaned up and will not be cleaned up automatically with the
+logic in ``resumeCallbacks`` (by design: this may not be desired behavior
+in all cases).  Therefore we add a callback to the main callback that
+does this job.
+
+    >>> j.callbacks[0] is j_callback
+    True
+    >>> len(j_callback.callbacks)
+    1
+
+``addCallback`` does not have this characteristic (you are responsible for any
+internal jobs, therefore).
+
+    >>> j_callback2 = zc.async.job.Job(multiply, 9)
+    >>> j_callback2 is j.addCallback(j_callback2)
+    True
+
+To continue with our example of introspecting the job...
+
+    >>> len(j.callbacks)
+    2
+    >>> j.callbacks[1] is j_callback2
+    True
+    >>> transaction.commit()
+    >>> res = j()
+    >>> j.result
+    16
+    >>> j_callback.result
+    80
+    >>> j_callback2.result
+    144
+    >>> len(j.callbacks)
+    2
+    >>> j.callbacks[0] is j_callback
+    True
+    >>> j.callbacks[1] is j_callback2
+    True
+
+The ``parent`` attribute should hold the immediate parent of a job. This
+means that a pending job will be within a queue; an assigned and active
+non-callback partial will be within an agent's queue (which is within a
+IDispatcherAgents collection, which is within a queue); and a callback
+will be within another job (which may be intermediate to the top
+level job, in which case parent of the intermediate job is
+the top level).  Here's an example.
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 3, 5)
+    >>> j_callback = zc.async.job.Job(multiply, 2)
+    >>> j_callback2 = j.addCallbacks(j_callback)
+    >>> j_callback.parent is j_callback2
+    True
+    >>> j_callback2.parent is j
+    True
+    >>> transaction.abort()
+
+=========
+Footnotes
+=========
+
+.. [#set_up] We'll actually create the state that the text needs here.
+    One thing to notice is that the ``zc.async.configure.base`` registers
+    the Job class as an adapter from functions and methods.
+
+    >>> from ZODB.tests.util import DB
+    >>> db = DB()
+    >>> conn = db.open()
+    >>> root = conn.root()
+    >>> import zc.async.configure
+    >>> zc.async.configure.base()
+
+.. [#verify] Verify interface
+
+    >>> from zope.interface.verify import verifyObject
+    >>> verifyObject(zc.async.interfaces.IJob, j)
+    True
+    
+    Note that status and result are readonly.
+    
+    >>> j.status = 1
+    Traceback (most recent call last):
+    ...
+    AttributeError: can't set attribute
+    >>> j.result = 1
+    Traceback (most recent call last):
+    ...
+    AttributeError: can't set attribute
+
+.. [#no_live_frames] Failures have two particularly dangerous bits: the
+    traceback and the stack.  We use the __getstate__ code on Failures
+    to clean them up.  This makes the traceback (``tb``) None...
+    
+    >>> j.result.tb # None
+    
+    ...and it makes all of the values in the stack--the locals and
+    globals-- into strings.  The stack is a list of lists, in which each
+    internal list represents a frame, and contains five elements: the
+    code name (``f_code.co_name``), the code file (``f_code.co_filename``),
+    the line number (``f_lineno``), an items list of the locals, and an
+    items list for the globals.  All of the values in the items list
+    would normally be objects, but are now strings.
+    
+    >>> for (codename, filename, lineno, local_i, global_i) in j.result.stack:
+    ...     for k, v in local_i:
+    ...         assert isinstance(v, basestring), 'bad local %s' % (v,)
+    ...     for k, v in global_i:
+    ...         assert isinstance(v, basestring), 'bad global %s' % (v,)
+    ...
+    
+    Here's a reasonable question.  The Twisted Failure code has a
+    __getstate__ that cleans up the failure, and that's even what we are
+    using to sanitize the failure.  If the failure is attached to a
+    job and stored in the ZODB, it is going to be cleaned up anyway.
+    Why explicitly clean up the failure even before it is pickled?
+
+    The answer might be classified as paranoia.  Just in case the failure
+    is kept around in memory longer--by being put on a deferred, or somehow
+    otherwise passed around--we want to eliminate any references to objects
+    in the connection as soon as possible.
+
+    Unfortunately, the __getstate__ code in the Twisted Failure can cause
+    some interaction problems for code that has a __repr__ with side effects--
+    like xmlrpclib, unfortunately.  The ``zc.twist`` package has a monkeypatch
+    for that particular problem, thanks to Florent Guillaume at Nuxeo, but
+    others may be discovered.
+
+.. [#explicit_failure_example] As the main text describes, if a call raises
+    an exception, the job will abort the transaction; but if it
+    returns a failure explicitly, the call is responsible for making any
+    desired changes to the transaction (such as aborting) before the
+    job calls commit.  Compare.  Here is a call that raises an
+    exception, and rolls back changes.
+    
+    (Note that we are passing arguments to the job, a topic that has
+    not yet been discussed in the text when this footnote is given: read
+    on a bit in the main text to see the details, if it seems surprising
+    or confusing.)
+
+    >>> def callAndRaise(ob):
+    ...     ob.increase()
+    ...     print ob.counter
+    ...     raise RuntimeError
+    ...
+    >>> j = root['raise_exception_example'] = zc.async.job.Job(
+    ...     callAndRaise, root['demo'])
+    >>> transaction.commit()
+    >>> root['demo'].counter
+    1
+    >>> res = j() # shows the result of the print in ``callAndRaise`` above.
+    2
+    >>> root['demo'].counter # it was rolled back
+    1
+    >>> print j.result.getTraceback() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    Traceback (most recent call last):
+    ...
+    exceptions.RuntimeError:
+
+    Here is a call that returns a failure, and does not abort, even though
+    the job result looks very similar.
+
+    >>> import twisted.python.failure
+    >>> def returnExplicitFailure(ob):
+    ...     ob.increase()
+    ...     try:
+    ...         raise RuntimeError
+    ...     except RuntimeError:
+    ...         # we could have just made and returned a failure without the
+    ...         # try/except, but this is intended to make crystal clear that
+    ...         # exceptions are irrelevant if you catch them and return a
+    ...         # failure
+    ...         return twisted.python.failure.Failure()
+    ...
+    >>> j = root['explicit_failure_example'] = zc.async.job.Job(
+    ...     returnExplicitFailure, root['demo'])
+    >>> transaction.commit()
+    >>> res = j()
+    >>> root['demo'].counter # it was not rolled back automatically
+    2
+    >>> j.result
+    <zc.twist.Failure exceptions.RuntimeError>
+    >>> print j.result.getTraceback() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    Traceback (most recent call last):
+    ...
+    exceptions.RuntimeError:
+
+.. [#call_self] Here's a job trying to call itself.
+
+    >>> def call(obj, *ignore):
+    ...     return obj()
+    ...
+    >>> j = root['j'] = zc.async.job.Job.bind(call)
+    >>> transaction.commit()
+    >>> res = j()
+    >>> print j.result.getTraceback() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+    Traceback (most recent call last):
+    ...
+    zc.async.interfaces.BadStatusError: can only call a job with NEW or ASSIGNED status
+
+.. [#callback_self] Here's a job's callback trying to call the job.
+
+    >>> j = root['j'] = zc.async.job.Job(multiply, 3, 4)
+    >>> j_callback = j.addCallbacks(
+    ...     zc.async.job.Job(call, j)).addCallbacks(failure=failure)
+    >>> transaction.commit()
+    >>> res = j() # doctest: +ELLIPSIS
+    failure. [Failure instance: Traceback: zc.async.interfaces.BadStatusError...]
+    >>> j.result # the main job still ran to completion
+    12

Copied: zc.async/trunk/src/zc/async/jobs_and_transactions.txt (from rev 85211, zc.async/branches/dev/src/zc/async/jobs_and_transactions.txt)
===================================================================
--- zc.async/trunk/src/zc/async/jobs_and_transactions.txt	                        (rev 0)
+++ zc.async/trunk/src/zc/async/jobs_and_transactions.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,268 @@
+This is a document for maintainers and for testing.
+
+Jobs manage their own transactions when they are called.  In normal
+use, this means that transactions are committed and aborted by the
+job itself at the points marked "COMMIT" and "ABORT" in this list
+(other software components will make commits, just not the partial):
+
+- client creates a job, puts it in a queue, and assigns callbacks to it
+  before it is run.
+- an agent claims a job
+- a dispatcher calls a job for the agent in a thread
+- job changes status to ACTIVE: COMMIT
+- job runs the wrapped callable, stores the result on its "result"
+  attribute, changes the status to CALLBACKS, and tries to COMMIT.
+  * if there is a ZODB.POSException.TransactionError, abort and retry 5
+    times, after which ABORT, set a Failure on the result attribute,
+    COMMIT, and skip to `complete`_ step below.
+  * if there is a SystemExit, KeyboardInterrupt, or any non-TransactionError
+    ZODB.POSException.POSError (which includes all ZEO-related storage
+    errors) ABORT and raise.
+  * if there are any other exceptions, ABORT, set a Failure on the result
+    attribute, COMMIT, and skip to `complete`_ step below.
+- If the result of the wrapped callable is a job or Twisted deferred,
+  add a callable for a method that sets the result, sets the status to
+  CALLBACKS, tries to commit as described above, and then proceeds with
+  the `complete`_ step.  COMMIT and return.
+- _`complete`: for each callback (which is itself a job), call it.
+  Each callback job will commit as described here.  The top job
+  catches no errors while it runs the callbacks.
+- When all callbacks have been called, set status to COMPLETED and COMMIT.
+  if there is a ZODB.POSException.TransactionError, look in the callbacks to
+  see if there is a new one.  If there is, perform it and try again; otherwise,
+  retry this forever, logging every time, because this should not happen
+  except in the case of a new additional callback.
+  logging retries: there should be no conflict errors, because no two
+  workers should be touching this job.
+- If a callback is added to this completed job, perform the callback
+  and COMMIT.  If anything fails, including a ConflictError, just raise it.
+  Someone else should abort as necessary.
+- If a callback is added to a job in any other status, set the job's
+  _p_changed to True and commit so that we raise a ConflictError, check the
+  status again, and retry if the job's status changed while we were
+  checking it.
+
+Note the following:
+- if a job's wrapped callable returns a failure, that means that it
+  is taking responsiblity for any necessary abort: the job will still
+  attempt to commit.
+- the status never changes out of COMPLETED even when a new callback is
+  added.
+- __call__ *can* raise a ConflictError; the only known way is to have two
+  workers start the same job, which should not be possible in normal
+  zc.async usage.
+- addCallbacks may raise a ConflictError: this would happen, for instance,
+  when status is COMPLETED so callbacks are performed immediately.
+
+What could go wrong?  In this list "T1" stands for one hypothetical
+thread, and "T2" stands for another hypothetical thread, often
+overlapping in time with T1.
+
+- T1 goes to CALLBACKS status and begins evaluating callbacks.  T2 adds another
+  callback [#set_up]_.  We need to be careful that the callback is executed.
+
+    >>> import threading
+    >>> _thread_lock = threading.Lock()
+    >>> _main_lock = threading.Lock()
+    >>> called = 0
+    >>> def safe_release(lock):
+    ...     while not lock.locked():
+    ...         pass
+    ...     lock.release()
+    ...
+    >>> def locked_call(res=None):
+    ...     global called
+    ...     safe_release(_main_lock)
+    ...     _thread_lock.acquire()
+    ...     called += 1
+    ...
+    >>> def call_from_thread(j):
+    ...     id = j._p_oid
+    ...     def call():
+    ...         conn = db.open()
+    ...         j = conn.get(id)
+    ...         j()
+    ...     return call
+    ...
+    >>> _thread_lock.acquire()
+    True
+    >>> _main_lock.acquire()
+    True
+    >>> import zc.async.job
+    >>> root['j'] = j = zc.async.job.Job(locked_call)
+    >>> j2 = j.addCallbacks(locked_call)
+    >>> import transaction
+    >>> transaction.commit()
+    >>> t = threading.Thread(target=call_from_thread(j))
+    >>> t.start()
+    >>> _main_lock.acquire()
+    True
+    >>> called
+    0
+    >>> trans = transaction.begin()
+    >>> j.status == zc.async.interfaces.ACTIVE
+    True
+    >>> safe_release(_thread_lock)
+    >>> _main_lock.acquire()
+    True
+    >>> called # the main call
+    1
+    >>> trans = transaction.begin()
+    >>> j.status == zc.async.interfaces.CALLBACKS
+    True
+    >>> j3 = j.addCallbacks(locked_call)
+    >>> transaction.commit()
+    >>> safe_release(_thread_lock)
+    >>> _main_lock.acquire()
+    True
+    >>> called # call back number one
+    2
+    >>> safe_release(_thread_lock)
+    >>> safe_release(_thread_lock)
+    >>> while t.isAlive():
+    ...     pass
+    ...
+    >>> called # call back number two
+    ...        # (added while first callback was in progress)
+    3
+    >>> _main_lock.release()
+
+- T1 goes to CALLBACKS status.  In the split second between checking for
+  any remaining callbacks and changing status to COMPLETED, T2 adds a
+  callback and commits.  T1 commits.  T2 thinks that callbacks are still
+  being processed, so does not process the callback, but meanwhile the
+  status is being switched to COMPLETED, and the new callback is never
+  made. For this, we could turn off MVCC, but we don't want to do that
+  if we can help it because of efficiency.  A better solution is to set
+  _p_changed in T2 on the job, and commit; if there's a conflict
+  error, re-get the status because its change may have caused the
+  conflict.
+
+    >>> import sys
+    >>> class LockedSetter(object):
+    ...     def __init__(self, name, condition, initial=None):
+    ...         self.name = name
+    ...         self.condition = condition
+    ...         self.value = initial
+    ...     def __get__(self, obj, typ=None):
+    ...         if obj is None:
+    ...             return self
+    ...         return getattr(obj, '_z_locked_' + self.name, self.value)
+    ...     def __set__(self, obj, value):
+    ...         if self.condition(obj, value):
+    ...             safe_release(_main_lock)
+    ...             _thread_lock.acquire()
+    ...         setattr(obj, '_z_locked_' + self.name, value)
+    ...
+    >>> import zc.async.job
+    >>> class Job(zc.async.job.Job):
+    ...     _status = LockedSetter(
+    ...         '_status',
+    ...         lambda o, v: v == zc.async.interfaces.COMPLETED,
+    ...         zc.async.interfaces.NEW)
+    ...
+    >>> called = 0
+    >>> def call(res=None):
+    ...     global called
+    ...     called += 1
+    ...
+    >>> root['j2'] = j = Job(call)
+    >>> transaction.commit()
+    >>> _thread_lock.acquire()
+    True
+    >>> _main_lock.acquire()
+    True
+    >>> t = threading.Thread(target=call_from_thread(j))
+    >>> t.start()
+    >>> _main_lock.acquire()
+    True
+    >>> trans = transaction.begin()
+    >>> called
+    1
+    >>> j.status == zc.async.interfaces.CALLBACKS
+    True
+    >>> j2 = j.addCallbacks(call)
+    >>> transaction.commit()
+    >>> safe_release(_thread_lock)
+    >>> _main_lock.acquire()
+    True
+    >>> trans = transaction.begin()
+    >>> called
+    2
+    >>> j.status == zc.async.interfaces.CALLBACKS
+    True
+    >>> safe_release(_thread_lock)
+    >>> safe_release(_thread_lock)
+    >>> while t.isAlive():
+    ...     pass
+    ...
+    >>> _main_lock.release()
+
+  Note, because of this, addCallbacks can raise a ConflictError: it probably
+  means that the status changed out from under it.  Just retry.
+
+- T1 is performing callbacks.  T2 begins and adds a callback.  T1 changes status
+  to COMPLETED and commits.  T2 commits.  If we don't handle it carefully,
+  the callback is never called.  So we handle it carefully.
+
+    >>> _thread_lock.acquire()
+    True
+    >>> _main_lock.acquire()
+    True
+    >>> called = 0
+    >>> root['j3'] = j = zc.async.job.Job(call)
+    >>> j1 = j.addCallbacks(locked_call)
+    >>> transaction.commit()
+    >>> t = threading.Thread(target=call_from_thread(j))
+    >>> t.start()
+    >>> _main_lock.acquire()
+    True
+    >>> called
+    1
+    >>> trans = transaction.begin()
+    >>> def call_and_unlock(res):
+    ...     global called
+    ...     called += 1
+    ...
+    >>> j2 = j.addCallbacks(call_and_unlock)
+    >>> safe_release(_thread_lock)
+    >>> safe_release(_thread_lock)
+    >>> while t.isAlive():
+    ...     pass
+    ...
+    >>> called # the main call
+    2
+    >>> transaction.commit() # doctest: +ELLIPSIS
+    Traceback (most recent call last):
+    ...
+    ConflictError: database conflict error (..., class zc.async.job.Job)
+    >>> transaction.abort()
+    >>> j2 = j.addCallbacks(call_and_unlock)
+    >>> called
+    3
+    >>> transaction.commit()
+    >>> _main_lock.release()
+
+- T1 adds a callback to COMPLETED status.  It immediately runs the callback.
+  Simultaneously, T2 adds a callback to COMPLETED status.  No problem.
+
+- two workers might claim and start the same job.  This should
+  already be stopped by workers committing transactions after they claimed
+  them.  This is considered to be a pathological case.
+
+- Generally, if a worker is determined to be dead, and its jobs are
+  handed out to other workers, but the worker is actually alive, this can
+  be a serious problem.  This is also considered to be a pathological case.
+
+=========
+Footnotes
+=========
+
+.. [#set_up] We'll actually create the state that the text needs here.
+
+    >>> from ZODB.tests.util import DB
+    >>> db = DB()
+    >>> conn = db.open()
+    >>> root = conn.root()
+    >>> import zc.async.configure
+    >>> zc.async.configure.base()
\ No newline at end of file

Copied: zc.async/trunk/src/zc/async/monitor.py (from rev 85211, zc.async/branches/dev/src/zc/async/monitor.py)
===================================================================
--- zc.async/trunk/src/zc/async/monitor.py	                        (rev 0)
+++ zc.async/trunk/src/zc/async/monitor.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,291 @@
+import re
+import datetime
+import pytz
+import uuid
+import simplejson
+
+import zope.component
+
+import zc.async.dispatcher
+
+_marker = object()
+class Encoder(simplejson.JSONEncoder):
+    def default(self, obj):
+        if isinstance(obj, datetime.timedelta):
+            tmp = {'days': obj.days,
+                   'hours': obj.seconds // (60*60),
+                   'minutes': (obj.seconds % (60*60)) // 60,
+                   'seconds': float(
+                        obj.seconds % 60) + obj.microseconds/1000000
+                  }
+            res = dict((k, v) for k, v in tmp.items() if v)
+            if not res:
+                res['seconds'] = 0.0
+            return res
+        # TODO the spelling of this conditional is to support our test setup
+        # shenanigans.  originally was ``isinstance(obj, datetime.datetime)``.
+        # Would be nice to fix, though the duck typing is Pythonic at least.
+        elif (getattr(obj, 'tzinfo', _marker) is not _marker and
+              getattr(obj, 'astimezone', _marker) is not _marker):
+            if obj.tzinfo is not None:
+                obj = obj.astimezone(pytz.UTC).replace(tzinfo=None)
+            return obj.isoformat() + "Z"
+        elif isinstance(obj, uuid.UUID):
+            return str(obj)
+        return simplejson.JSONEncoder.default(self, obj)
+
+encoder = Encoder(sort_keys=True, indent=4) 
+
+
+def status(uuid=None):
+    """Get general zc.async dispatcher information.
+    
+    'status' is one of 'STUCK', 'STARTING', 'RUNNING', or 'STOPPED'."""
+    if uuid is not None:
+        uuid = uuid.UUID(uuid)
+    return encoder.encode(zc.async.dispatcher.get(uuid).getStatusInfo())
+
+def jobs(queue=None, agent=None, uuid=None):
+    """Show active jobs as of last poll, sorted from newest to oldest.
+    
+    Usage:
+
+        jobs
+        (returns active jobs as of last poll, newest to oldest)
+
+        jobs queue:<queue name>
+        (jobs are filtered to those coming from the named queue)
+        
+        jobs agent:<agent name>
+        (jobs are filtered to those coming from agents with given name)
+
+    "queue:" and "agent:" modifiers may be combined.
+
+    Example:
+
+        async jobs queue: agent:main
+        (results filtered to queue named '' and agent named 'main')"""
+    if uuid is not None:
+        uuid = uuid.UUID(uuid)
+    return encoder.encode(
+        zc.async.dispatcher.get(uuid).getActiveJobIds(queue, agent))
+
+def job(OID, database=None, uuid=None):
+    """Local information about a job as of last poll, if known.
+
+    Does not consult ZODB, but in-memory information."""
+    if uuid is not None:
+        uuid = uuid.UUID(uuid)
+    return encoder.encode(
+        zc.async.dispatcher.get(uuid).getJobInfo(OID, database))
+
+_find = re.compile(r'\d+[DHMS]').findall
+def _dt(s):
+    if s is None:
+        res = s
+    else:
+        try:
+            res = int(s)
+        except ValueError:
+            vals = {}
+            for val in _find(s.upper()):
+                vals[val[-1]] = int(val[:-1])
+            res = datetime.timedelta(
+                days=vals.get('D', 0),
+                hours=vals.get('H', 0),
+                minutes=vals.get('M', 0),
+                seconds=vals.get('S', 0)) + datetime.datetime.utcnow()
+    return res
+                
+
+def jobstats(at=None, before=None, since=None, queue=None, agent=None,
+             uuid=None):
+    """Statistics on historical jobs as of last poll.
+    
+    Usage:
+
+        jobstats
+        (returns statistics on historical jobs as of last poll)
+
+        jobstats queue:<queue name>
+        (statistics are filtered to those coming from the named queue)
+
+        jobstats agent:<agent name>
+        (statistics are filtered to those coming from agents with given name)
+
+        jobstats at:<poll key or interval>
+        (statistics are collected at or before the poll key or interval)
+
+        jobstats before:<pollkey or interval>
+        (statistics are collected before the poll key or interval)
+
+        jobstats since:<pollkey or interval>
+        (statistics are collected since poll key or interval, inclusive)
+
+    The modifiers "queue:", "agent:", "since:", and one of "at:" or "before:"
+    may be combined.
+
+    Intervals are of the format ``[nD][nH][nM][nS]``, where "n" should
+    be replaced with a positive integer, and "D," "H," "M," and "S" are
+    literals standing for "days," "hours," "minutes," and "seconds." 
+    For instance, you might use ``5M`` for five minutes, ``20S`` for
+    twenty seconds, or ``1H30M`` for an hour and a half.
+    
+    Poll keys are the values shown as "key" from the ``poll`` or ``polls``
+    command.
+
+    Example:
+
+        async jobstats queue: agent:main since:1H
+        (results filtered to queue named '' and agent named 'main' from now
+         till one hour ago)"""
+    # TODO: parse since and before to datetimes
+    if uuid is not None:
+        uuid = uuid.UUID(uuid)
+    return encoder.encode(
+        zc.async.dispatcher.get(uuid).getStatistics(
+            _dt(at), _dt(before), _dt(since), queue, agent))
+
+def poll(at=None, before=None, uuid=None):
+    """Get information about a single poll, defaulting to most recent.
+    
+    Usage:
+    
+        poll
+        (returns most recent poll)
+        
+        poll at:<poll key or interval>
+        (returns poll at or before the poll key or interval)
+        
+        poll before:<poll key or interval>
+        (returns poll before the poll key or interval)
+
+    Intervals are of the format ``[nD][nH][nM][nS]``, where "n" should
+    be replaced with a positive integer, and "D," "H," "M," and "S" are
+    literals standing for "days," "hours," "minutes," and "seconds." 
+    For instance, you might use ``5M`` for five minutes, ``20S`` for
+    twenty seconds, or ``1H30M`` for an hour and a half.
+    
+    Example:
+    
+        async poll at:5M
+        (get the poll information at five minutes ago or before)"""
+    # TODO: parse at and before to datetimes
+    if uuid is not None:
+        uuid = uuid.UUID(uuid)
+    info = zc.async.dispatcher.get(uuid).getPollInfo(_dt(at), _dt(before))
+    res = {'key': info.key, 'time': info.utc_timestamp.isoformat() + "Z",
+           'results': info}
+    return encoder.encode(res)
+
+def polls(at=None, before=None, since=None, count=None, uuid=None):
+    """Get information about recent polls, defaulting to most recent.
+    
+    Usage:
+    
+        polls
+        (returns most recent 3 poll)
+        
+        polls at:<poll key or interval>
+        (returns up to 3 polls at or before the poll key or interval)
+        
+        polls before:<poll key or interval>
+        (returns up to 3 polls before the poll key or interval)
+        
+        polls since:<poll key or interval>
+        (returns polls since the poll key or interval, inclusive)
+        
+        polls count <positive integer>
+        (returns the given number of the most recent files)
+
+    The modifiers "since:", "count:", and one of "at:" or "before:" may
+    be combined.
+
+    Intervals are of the format ``[nD][nH][nM][nS]``, where "n" should
+    be replaced with a positive integer, and "D," "H," "M," and "S" are
+    literals standing for "days," "hours," "minutes," and "seconds." 
+    For instance, you might use ``5M`` for five minutes, ``20S`` for
+    twenty seconds, or ``1H30M`` for an hour and a half.
+    
+    Example:
+    
+        async polls before:5M since:10M
+        (get the poll information from 5 to 10 minutes ago)"""
+    if uuid is not None:
+        uuid = uuid.UUID(uuid)
+    if count is None:
+        if since is None:
+            count = 3
+    else:
+        count = int(count)
+    return encoder.encode(
+        [{'key': p.key, 'time': p.utc_timestamp.isoformat() + "Z",
+          'results': p}
+         for p in zc.async.dispatcher.get(uuid).iterPolls(
+            _dt(at), _dt(before), _dt(since), count)])
+
+# provide in async and separately:
+
+def utcnow():
+    """Return the current time in UTC, in ISO 8601 format."""
+    return datetime.datetime.utcnow().isoformat() + "Z"
+
+def UUID():
+    """Get instance UUID in hex."""
+    res = zope.component.getUtility(zc.async.interfaces.IUUID)
+    if res is not None:
+        return str(res)
+
+funcs = {}
+
+def help(cmd=None):
+    """Get help on an async monitor tool.
+    
+    Usage is 'async help <tool name>' or 'async help'."""
+    if cmd is None:
+        res = [
+            "These are the tools available.  Usage for each tool is \n"
+            "'async <tool name> [modifiers...]'.  Learn more about each \n"
+            "tool using 'async help <tool name>'.\n"]
+        for nm, func in sorted(funcs.items()):
+            res.append('%s: %s' % (
+                nm, func.__doc__.split('\n', 1)[0]))
+        return '\n'.join(res)
+    f = funcs.get(cmd)
+    if f is None:
+        return 'Unknown async tool'
+    return f.__doc__
+
+for f in status, jobs, job, jobstats, poll, polls, utcnow, UUID, help:
+    funcs[f.__name__] = f
+
+def async(connection, cmd=None, *raw):
+    """A collection of tools to monitor zc.async activity in this process.
+    
+    To see a list of async tools, use 'async help'.
+    
+    To learn more about an async monitor tool, use 'async help <tool name>'."""
+    if cmd is None:
+        res = async.__doc__
+    else:
+        f = funcs.get(cmd)
+        if f is None:
+            res = '[Unknown async tool]'
+        else:
+            args = []
+            kwargs = {}
+            for val in raw:
+                if ':' in val:
+                    key, val = val.split(':', 1)
+                    kwargs[key] = val
+                else:
+                    if kwargs:
+                        raise ValueError(
+                            'placeful modifiers must come before named '
+                            'modifiers')
+                    args.append(val)
+            res = f(*args, **kwargs)
+    connection.write(res)
+    connection.write('\n')
+
+    

Copied: zc.async/trunk/src/zc/async/monitor.txt (from rev 85211, zc.async/branches/dev/src/zc/async/monitor.txt)
===================================================================
--- zc.async/trunk/src/zc/async/monitor.txt	                        (rev 0)
+++ zc.async/trunk/src/zc/async/monitor.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,353 @@
+Monitoring Dispatchers
+======================
+
+A process's zc.async dispatcher [#setUp]_ can be monitored in-process via
+zc.z3monitor plugins.  Let's imagine we have a connection over which we
+can send text messages to the monitor server [#z3monitor_setup]_.
+
+All monitoring is done through the ``async`` command.  Here is its
+description, using the zc.z3monitor ``help`` command.
+
+    >>> connection.test_input('help async\n')
+    Help for async:
+    <BLANKLINE>
+    A collection of tools to monitor zc.async activity in this process.
+    <BLANKLINE>
+        To see a list of async tools, use 'async help'.
+    <BLANKLINE>
+        To learn more about an async monitor tool, use 'async help <tool name>'.
+    -> CLOSE
+
+As you can see, you use ``async help`` to get more information about each
+async-specific command.
+
+    >>> connection.test_input('async help\n')
+    These are the tools available.  Usage for each tool is 
+    'async <tool name> [modifiers...]'.  Learn more about each 
+    tool using 'async help <tool name>'.
+    <BLANKLINE>
+    UUID: Get instance UUID in hex.
+    help: Get help on an async monitor tool.
+    job: Local information about a job as of last poll, if known.
+    jobs: Show active jobs as of last poll, sorted from newest to oldest.
+    jobstats: Statistics on historical jobs as of last poll.
+    poll: Get information about a single poll, defaulting to most recent.
+    polls: Get information about recent polls, defaulting to most recent.
+    status: Get general zc.async dispatcher information.
+    utcnow: Return the current time in UTC, in ISO 8601 format. 
+    -> CLOSE
+
+Let's give a quick run through these for an overview, and then we'll dig in
+just a bit.
+
+The ``UUID`` command returns the instance's UUID.
+
+    >>> connection.test_input('async help UUID\n')
+    Get instance UUID in hex. 
+    -> CLOSE
+
+    >>> connection.test_input('async UUID\n')
+    d10f43dc-ffdf-11dc-abd4-0017f2c49bdd 
+    -> CLOSE
+
+The ``utcnow`` command returns the current time in UTC.  This can be
+convenient to decipher the meaning of UTC datetimes returned from other
+commands.
+
+    >>> connection.test_input('async help utcnow\n')
+    Return the current time in UTC, in ISO 8601 format. 
+    -> CLOSE
+
+    >>> connection.test_input('async utcnow\n')
+    2006-08-10T15:44:23.000211Z 
+    -> CLOSE
+
+The ``status`` command is the first of the "serious" monitoring
+commands.  As such, it starts some patterns that the rest of the
+commands will follow.
+
+- output is pretty-printed JSON
+
+- durations are in a dict of keys 'days', 'hours', 'minutes', and 'seconds',
+  with all as ints except seconds, which is a float.
+
+    >>> connection.test_input('async help status\n')
+    Get general zc.async dispatcher information.
+    <BLANKLINE>
+        'status' is one of 'STUCK', 'STARTING', 'RUNNING', or 'STOPPED'. 
+    -> CLOSE
+
+    >>> connection.test_input('async status\n')
+    {
+        "poll interval": {
+            "seconds": 5.0
+        }, 
+        "status": "RUNNING", 
+        "time since last poll": {
+            "seconds": 1.0
+        }, 
+        "uptime": {
+            "seconds": 1.0
+        }, 
+        "uuid": "d10f43dc-ffdf-11dc-abd4-0017f2c49bdd"
+    } 
+    -> CLOSE
+
+Here's the ``jobs`` command.  It introduces some new patterns.
+
+- some command modifiers are available as <modifier>:<value>
+
+- several commands have the "queue:" and "agent:" modifiers.
+
+    >>> connection.test_input('async help jobs\n')
+    Show active jobs as of last poll, sorted from newest to oldest.
+    <BLANKLINE>
+        Usage:
+    <BLANKLINE>
+            jobs
+            (returns active jobs as of last poll, newest to oldest)
+    <BLANKLINE>
+            jobs queue:<queue name>
+            (jobs are filtered to those coming from the named queue)
+    <BLANKLINE>
+            jobs agent:<agent name>
+            (jobs are filtered to those coming from agents with given name)
+    <BLANKLINE>
+        "queue:" and "agent:" modifiers may be combined.
+    <BLANKLINE>
+        Example:
+    <BLANKLINE>
+            async jobs queue: agent:main
+            (results filtered to queue named '' and agent named 'main') 
+    -> CLOSE
+
+    >>> connection.test_input('async jobs\n')
+    [] 
+    -> CLOSE
+
+The ``jobstats`` analyzes past polls and job information to come up with
+some potentially useful statistics.  It includes the optional "queue:" and
+"agent:" modifiers.  It also shows some new patterns.
+
+- datetimes are in UTC, in ISO 8601 format.
+
+- The "at:", "before:" and "since:" modifiers are intervals, or poll keys.
+
+- "at:" and "before:" may not be combined.
+
+    >>> connection.test_input('async help jobstats\n')
+    Statistics on historical jobs as of last poll.
+    <BLANKLINE>
+        Usage:
+    <BLANKLINE>
+            jobstats
+            (returns statistics on historical jobs as of last poll)
+    <BLANKLINE>
+            jobstats queue:<queue name>
+            (statistics are filtered to those coming from the named queue)
+    <BLANKLINE>
+            jobstats agent:<agent name>
+            (statistics are filtered to those coming from agents with given name)
+    <BLANKLINE>
+            jobstats at:<poll key or interval>
+            (statistics are collected at or before the poll key or interval)
+    <BLANKLINE>
+            jobstats before:<pollkey or interval>
+            (statistics are collected before the poll key or interval)
+    <BLANKLINE>
+            jobstats since:<pollkey or interval>
+            (statistics are collected since poll key or interval, inclusive)
+    <BLANKLINE>
+        The modifiers "queue:", "agent:", "since:", and one of "at:" or "before:"
+        may be combined.
+    <BLANKLINE>
+        Intervals are of the format ``[nD][nH][nM][nS]``, where "n" should
+        be replaced with a positive integer, and "D," "H," "M," and "S" are
+        literals standing for "days," "hours," "minutes," and "seconds." 
+        For instance, you might use ``5M`` for five minutes, ``20S`` for
+        twenty seconds, or ``1H30M`` for an hour and a half.
+    <BLANKLINE>
+        Poll keys are the values shown as "key" from the ``poll`` or ``polls``
+        command.
+    <BLANKLINE>
+        Example:
+    <BLANKLINE>
+            async jobstats queue: agent:main since:1H
+            (results filtered to queue named '' and agent named 'main' from now
+             till one hour ago) 
+    -> CLOSE
+
+    >>> connection.test_input('async jobstats\n')
+    {
+        "failed": 0, 
+        "longest active": null, 
+        "longest failed": null, 
+        "longest successful": null, 
+        "shortest active": null, 
+        "shortest failed": null, 
+        "shortest successful": null, 
+        "started": 0, 
+        "statistics end": "2006-08-10T15:44:22.000211Z", 
+        "statistics start": "2006-08-10T15:44:22.000211Z", 
+        "successful": 0, 
+        "unknown": 0
+    } 
+    -> CLOSE
+
+The ``poll`` command uses patterns we've seen above.
+
+    >>> connection.test_input('async help poll\n')
+    Get information about a single poll, defaulting to most recent.
+    <BLANKLINE>
+        Usage:
+    <BLANKLINE>    
+            poll
+            (returns most recent poll)
+    <BLANKLINE>
+            poll at:<poll key or interval>
+            (returns poll at or before the poll key or interval)
+    <BLANKLINE>
+            poll before:<poll key or interval>
+            (returns poll before the poll key or interval)
+    <BLANKLINE>
+        Intervals are of the format ``[nD][nH][nM][nS]``, where "n" should
+        be replaced with a positive integer, and "D," "H," "M," and "S" are
+        literals standing for "days," "hours," "minutes," and "seconds." 
+        For instance, you might use ``5M`` for five minutes, ``20S`` for
+        twenty seconds, or ``1H30M`` for an hour and a half.
+    <BLANKLINE>
+        Example:
+    <BLANKLINE>
+            async poll at:5M
+            (get the poll information at five minutes ago or before) 
+    -> CLOSE
+
+    >>> connection.test_input('async poll\n')
+    {
+        "key": 6420106068108777167, 
+        "results": {
+            "": {}
+        }, 
+        "time": "2006-08-10T15:44:22.000211Z"
+    } 
+    -> CLOSE
+
+``polls`` does too.
+
+    >>> connection.test_input('async help polls\n')
+    Get information about recent polls, defaulting to most recent.
+    <BLANKLINE>
+        Usage:
+    <BLANKLINE>
+            polls
+            (returns most recent 3 poll)
+    <BLANKLINE>
+            polls at:<poll key or interval>
+            (returns up to 3 polls at or before the poll key or interval)
+    <BLANKLINE>
+            polls before:<poll key or interval>
+            (returns up to 3 polls before the poll key or interval)
+    <BLANKLINE>
+            polls since:<poll key or interval>
+            (returns polls since the poll key or interval, inclusive)
+    <BLANKLINE>
+            polls count <positive integer>
+            (returns the given number of the most recent files)
+    <BLANKLINE>
+        The modifiers "since:", "count:", and one of "at:" or "before:" may
+        be combined.
+    <BLANKLINE>
+        Intervals are of the format ``[nD][nH][nM][nS]``, where "n" should
+        be replaced with a positive integer, and "D," "H," "M," and "S" are
+        literals standing for "days," "hours," "minutes," and "seconds." 
+        For instance, you might use ``5M`` for five minutes, ``20S`` for
+        twenty seconds, or ``1H30M`` for an hour and a half.
+    <BLANKLINE>
+        Example:
+    <BLANKLINE>
+            async polls before:5M since:10M
+            (get the poll information from 5 to 10 minutes ago) 
+    -> CLOSE
+
+    >>> connection.test_input('async polls\n')
+    [
+        {
+            "key": 6420106068108777167, 
+            "results": {
+                "": {}
+            }, 
+            "time": "2006-08-10T15:44:22.000211Z"
+        }
+    ] 
+    -> CLOSE
+
+.. [#setUp] See the discussion in other documentation to explain this code.
+
+    >>> import ZODB.FileStorage
+    >>> storage = ZODB.FileStorage.FileStorage(
+    ...     'zc_async.fs', create=True)
+    >>> from ZODB.DB import DB 
+    >>> db = DB(storage) 
+    >>> conn = db.open()
+    >>> root = conn.root()
+
+    >>> import zc.async.configure
+    >>> zc.async.configure.base()
+
+    >>> import zc.async.testing
+    >>> reactor = zc.async.testing.Reactor()
+    >>> reactor.start() # this mokeypatches datetime.datetime.now 
+
+    >>> import zc.async.queue
+    >>> import zc.async.interfaces
+    >>> mapping = root[zc.async.interfaces.KEY] = zc.async.queue.Queues()
+    >>> queue = mapping[''] = zc.async.queue.Queue()
+    >>> import transaction
+    >>> transaction.commit()
+
+    >>> import zc.async.dispatcher
+    >>> dispatcher = zc.async.dispatcher.Dispatcher(db, reactor)
+    >>> dispatcher.activate()
+    >>> reactor.time_flies(1)
+    1
+
+    >>> import zc.async.agent
+    >>> agent = zc.async.agent.Agent()
+    >>> queue.dispatchers[dispatcher.UUID]['main'] = agent
+    >>> transaction.commit()
+
+    >>> import time
+    >>> def wait_for(*jobs, **kwargs):
+    ...     reactor.time_flies(dispatcher.poll_interval) # starts thread
+    ...     # now we wait for the thread
+    ...     for i in range(kwargs.get('attempts', 10)):
+    ...         while reactor.time_passes():
+    ...             pass
+    ...         transaction.begin()
+    ...         for j in jobs:
+    ...             if j.status != zc.async.interfaces.COMPLETED:
+    ...                 break
+    ...         else:
+    ...             break
+    ...         time.sleep(0.1)
+    ...     else:
+    ...         print 'TIME OUT'
+    ...
+
+.. [#z3monitor_setup] This part actually sets up the monitoring.
+
+    >>> import zc.ngi.testing
+    >>> import zc.z3monitor
+
+    >>> connection = zc.ngi.testing.TextConnection()
+    >>> server = zc.z3monitor.Server(connection)
+
+    >>> import zc.async.monitor
+    >>> import zope.component
+    >>> import zc.z3monitor.interfaces
+    >>> zope.component.provideUtility(
+    ...     zc.async.monitor.async,
+    ...     zc.z3monitor.interfaces.IZ3MonitorPlugin,
+    ...     'async')
+    >>> zope.component.provideUtility(zc.z3monitor.help,
+    ...     zc.z3monitor.interfaces.IZ3MonitorPlugin, 'help')

Copied: zc.async/trunk/src/zc/async/multidb_dispatcher_policy.zcml (from rev 85211, zc.async/branches/dev/src/zc/async/multidb_dispatcher_policy.zcml)
===================================================================
--- zc.async/trunk/src/zc/async/multidb_dispatcher_policy.zcml	                        (rev 0)
+++ zc.async/trunk/src/zc/async/multidb_dispatcher_policy.zcml	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configure xmlns="http://namespaces.zope.org/zope">
+    <include file="dispatcher.zcml" />
+    <subscriber handler=".subscribers.multidb_queue_installer" />
+    <subscriber handler=".subscribers.threaded_dispatcher_installer" />
+    <subscriber handler=".subscribers.agent_installer" />
+    <adapter factory="zc.async.queue.getDefaultQueue" />
+</configure>

Deleted: zc.async/trunk/src/zc/async/partial.py
===================================================================
--- zc.async/trunk/src/zc/async/partial.py	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/partial.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -1,246 +0,0 @@
-
-import types
-
-import BTrees.OOBTree
-import ZODB.POSException
-import transaction.interfaces
-import persistent
-import persistent.list
-import persistent.mapping
-import twisted.internet.defer
-import twisted.python.failure
-import zope.interface
-import zc.queue
-
-import zc.async.interfaces
-import zc.twist
-from zc.async import rwproperty
-
-def success_or_failure(success, failure, res):
-    callable = None
-    if isinstance(res, twisted.python.failure.Failure):
-        if failure is not None:
-            callable = failure
-    elif success is not None:
-        callable = success
-    if callable is None:
-        return res
-    return callable(res)
-
-def completeStartedPartialArguments(partial, result):
-    if isinstance(result, twisted.python.failure.Failure):
-        for collection in (partial.args, partial.kwargs.values()):
-            for a in collection:
-                if (zc.async.interfaces.IPartial.providedBy(a) and
-                    a.state not in (
-                        zc.async.interfaces.PENDING,
-                        zc.async.interfaces.COMPLETED)):
-                    if a.state == zc.async.interfaces.ACTIVE:
-                        a.fail()
-                    elif a.state == zc.async.interfaces.CALLBACKS:
-                        a.resumeCallbacks()
-    return result
-
-class Partial(persistent.Persistent):
-
-    zope.interface.implements(zc.async.interfaces.IPartial)
-    zope.interface.classProvides(zc.async.interfaces.IPartialFactory)
-
-    __parent__ = _callable_root = _callable_name = _result = None
-    _state = zc.async.interfaces.PENDING
-
-    def __init__(self, *args, **kwargs):
-        self.args = persistent.list.PersistentList(args)
-        self.callable = self.args.pop(0)
-        self.kwargs = persistent.mapping.PersistentMapping(kwargs)
-        self.callbacks = zc.queue.PersistentQueue()
-        self.annotations = BTrees.OOBTree.OOBTree()
-
-    @property
-    def result(self):
-        return self._result
-
-    @property
-    def state(self):
-        return self._state
-
-    @property
-    def unhandled_error(self):
-        if (self.state in (zc.async.interfaces.COMPLETED,
-                           zc.async.interfaces.CALLBACKS) and
-            isinstance(self.result, twisted.python.failure.Failure)):
-            ct = 0
-            for c in self.callbacks:
-                if (c.state not in (zc.async.interfaces.COMPLETED,
-                                    zc.async.interfaces.CALLBACKS) or
-                    c.unhandled_error):
-                    return True
-                ct += 1
-            if not ct:
-                return True
-        return False
-
-    @classmethod
-    def bind(klass, *args, **kwargs):
-        res = klass(*args, **kwargs)
-        res.args.insert(0, res)
-        return res
-
-    @property
-    def callable(self):
-        if self._callable_name is None:
-            return self._callable_root
-        else:
-            return getattr(self._callable_root, self._callable_name)
-    @rwproperty.setproperty
-    def callable(self, value):
-        # can't pickle/persist methods by default as of this writing, so we
-        # add the sugar ourselves
-        if self.state != zc.async.interfaces.PENDING:
-            raise zc.async.interfaces.BadStateError(
-                'can only set callable when a partial is in PENDING state')
-        if isinstance(value, types.MethodType):
-            self._callable_root = value.im_self
-            self._callable_name = value.__name__
-        else:
-            self._callable_root, self._callable_name = value, None
-
-    def addCallbacks(self, success=None, failure=None):
-        if success is not None or failure is not None:
-            if success is not None:
-                success = zc.async.interfaces.IPartial(success)
-            if failure is not None:
-                failure = zc.async.interfaces.IPartial(failure)
-            res = Partial(success_or_failure, success, failure)
-            if success is not None:
-                success.__parent__ = res
-            if failure is not None:
-                failure.__parent__ = res
-            self.addCallback(res)
-            # we need to handle the case of callbacks on the internal success/
-            # failure partials, to be safe.
-            abort_handler = zc.async.interfaces.IPartial(
-                completeStartedPartialArguments)
-            abort_handler.args.append(res)
-            res = res.addCallback(abort_handler)
-        else:
-            res = self
-        return res
-
-    def addCallback(self, callback):
-        callback = zc.async.interfaces.IPartial(callback)
-        self.callbacks.put(callback)
-        callback.__parent__ = self
-        if self.state == zc.async.interfaces.COMPLETED:
-            callback(self.result) # this commits transactions!
-        else:
-            self._p_changed = True # to try and fire conflict errors if
-            # our reading of self.state has changed beneath us
-        return callback
-
-    def __call__(self, *args, **kwargs):
-        if self.state != zc.async.interfaces.PENDING:
-            raise zc.async.interfaces.BadStateError(
-                'can only call a partial in PENDING state')
-        tm = transaction.interfaces.ITransactionManager(self)
-        self._state = zc.async.interfaces.ACTIVE
-        tm.commit()
-        effective_args = list(args)
-        effective_args[0:0] = self.args
-        effective_kwargs = dict(self.kwargs)
-        effective_kwargs.update(kwargs)
-        return self._call_with_retry(
-            lambda: self.callable(*effective_args, **effective_kwargs))
-
-    def _call_with_retry(self, call):
-        ct = 0
-        tm = transaction.interfaces.ITransactionManager(self)
-        res = None
-        while 1:
-            try:
-                res = call()
-                if zc.async.interfaces.IPartial.providedBy(res):
-                    res.addCallback(self._callback)
-                elif isinstance(res, twisted.internet.defer.Deferred):
-                    res.addBoth(zc.twist.Partial(self._callback))
-                else:
-                    if isinstance(res, twisted.python.failure.Failure):
-                        res = zc.twist.sanitize(res)
-                    self._result = res
-                    self._state = zc.async.interfaces.CALLBACKS
-                tm.commit()
-            except ZODB.POSException.TransactionError:
-                tm.abort()
-                ct += 1
-                if ct >= 5:
-                    res = self._result = zc.twist.sanitize(
-                        twisted.python.failure.Failure())
-                    self._state = zc.async.interfaces.CALLBACKS
-                    tm.commit()
-                    self.resumeCallbacks()
-                else:
-                    continue
-            except zc.twist.EXPLOSIVE_ERRORS:
-                tm.abort()
-                raise
-            except:
-                tm.abort()
-                res = self._result = zc.twist.sanitize(
-                    twisted.python.failure.Failure())
-                self._state = zc.async.interfaces.CALLBACKS
-                tm.commit()
-                self.resumeCallbacks()
-            else:
-                if self.state == zc.async.interfaces.CALLBACKS:
-                    self.resumeCallbacks()
-            return res
-
-    def _callback(self, res):
-        self._call_with_retry(lambda: res)
-
-    def fail(self, e=None):
-        if e is None:
-            e = zc.async.interfaces.AbortedError()
-        if self.state not in (zc.async.interfaces.PENDING,
-                              zc.async.interfaces.ACTIVE):
-            raise zc.async.interfaces.BadStateError(
-                'can only call fail on a partial in PENDING or ACTIVE states')
-        tm = transaction.interfaces.ITransactionManager(self)
-        self._result = zc.twist.sanitize(
-            twisted.python.failure.Failure(e))
-        self._state = zc.async.interfaces.CALLBACKS
-        tm.commit()
-        self.resumeCallbacks()
-
-    def resumeCallbacks(self):
-        if self.state != zc.async.interfaces.CALLBACKS:
-            raise zc.async.interfaces.BadStateError(
-                'can only resumeCallbacks on a partial in CALLBACKS state')
-        callbacks = list(self.callbacks)
-        tm = transaction.interfaces.ITransactionManager(self)
-        length = 0
-        while 1:
-            for p in callbacks:
-                if p.state == zc.async.interfaces.PENDING:
-                    p(self.result)
-                elif p.state == zc.async.interfaces.ACTIVE:
-                    p.fail()
-                elif p.state == zc.async.interfaces.CALLBACKS:
-                    p.resumeCallbacks()
-                # TODO: this shouldn't raise anything we want to catch, right?
-                # now, this should catch all the errors except EXPLOSIVE_ERRORS
-                # cleaning up dead partials should look something like the above.
-            tm.commit()
-            # it's possible that someone added some callbacks run until
-            # we're exhausted.
-            length += len(callbacks)
-            callbacks = list(self.callbacks)[length:]
-            if not callbacks:
-                try:
-                    self._state = zc.async.interfaces.COMPLETED
-                    tm.commit()
-                except ZODB.POSException.TransactionError:
-                    tm.abort()
-                    callbacks = list(self.callbacks)[length:]
-                else:
-                    break # and return

Deleted: zc.async/trunk/src/zc/async/partial.txt
===================================================================
--- zc.async/trunk/src/zc/async/partial.txt	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/partial.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -1,912 +0,0 @@
-========
-Partials
-========
-
-What if you want to persist a reference to the method of a persistent
-object--you can't persist that normally in the ZODB, but that can be
-very useful, especially to store asynchronous calls.  What if you want
-to act on the result of an asynchronous call that may be called later? 
-The zc.async package offers an approach modelled loosely on the Twisted
-deferred code: `zc.async.partial.Partial`.  To use it, simply wrap the
-callable--a method of a persistent object or a callable persistent
-object or a global function--in the partial.  You can include ordered
-and keyword arguments to the partial, which may be persistent objects or
-simply pickleable objects.
-
-Unlike a normal partial, the result of the wrapped call goes on the
-partial's 'result' attribute, and the immediate return of the call might
-not be the end result.  It could also be a failure, indicating an
-exception; or another partial, indicating that we are waiting to be
-called back by the second partial; or a twisted deferred, indicating
-that we are waiting to be called back by a twisted Deferred (see the
-`twist` module, also in this package).  After you have the partial, you
-can then use a number of methods and attributes on the partial for
-further set up.  Let's show the most basic use first, though.
-
-Note that, even though this looks like an interactive prompt, all
-functions and classes defined in this document act as if they were
-defined within a module.  Classes and functions defined in an interactive
-prompt are normally not picklable, and the async Partial must work with
-picklable objects [#set_up]_.
-
-    >>> import zc.async.partial
-    >>> def call():
-    ...     print 'hello world'
-    ...     return 'my result'
-    ...
-    >>> p = root['p'] = zc.async.partial.Partial(call)
-    >>> import transaction
-    >>> transaction.commit()
-
-Now we have a partial [#verify]_.  We can see that the state is PENDING,
-call it, and then see that the function was called, and see the result on
-the partial.
-
-    >>> import zc.async.interfaces
-    >>> p.state == zc.async.interfaces.PENDING
-    True
-    >>> res = p()
-    hello world
-    >>> p.result
-    'my result'
-    >>> p.state == zc.async.interfaces.COMPLETED
-    True
-
-The result of the partial also happens to be the end result of the call,
-but as mentioned above, the partial may return a deferred or another partial.
-
-    >>> res
-    'my result'
-
-We can also use a method of a persistent object.  Imagine we have a ZODB
-root that we can put objects in to.
-
-    >>> import persistent
-    >>> class Demo(persistent.Persistent):
-    ...     counter = 0
-    ...     def increase(self, value=1):
-    ...         self.counter += value
-    ...
-    >>> demo = root['demo'] = Demo()
-    >>> demo.counter
-    0
-    >>> p = root['p'] = zc.async.partial.Partial(demo.increase)
-    >>> transaction.commit()
-    >>> p() # result is None
-    >>> demo.counter
-    1
-
-So our two calls so far have returned direct successes.  This one returns
-a failure, because the wrapped call raises an exception.
-
-    >>> def callFailure():
-    ...     raise RuntimeError('Bad Things Happened Here')
-    ...
-    >>> p = root['p'] = zc.async.partial.Partial(callFailure)
-    >>> transaction.commit()
-    >>> res = p()
-    >>> p.result
-    <twisted.python.failure.Failure exceptions.RuntimeError>
-
-These are standard twisted Failures, except that frames in the stored
-traceback have been converted to reprs, so that we don't keep references
-around when we pass the Failures around (over ZEO, for instance)
-[#no_live_frames]_.  This doesn't stop us from getting nice tracebacks,
-though.
-
-    >>> print p.result.getTraceback() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
-    Traceback (most recent call last):
-    ...
-    exceptions.RuntimeError: Bad Things Happened Here
-
-Note that all calls can return a failure explicitly, rather than raising
-an exception that the partial converts to an exception.  However, there
-is an important difference in behavior.  If a wrapped call raises an
-exception, the partial aborts the transaction; but if the wrapped call
-returns a failure, no abort occurs.  Wrapped calls that explicitly return
-failures are thus responsible for any necessary transaction aborts.  See
-the footnote for an example [#explicit_failure_example]_.
-
-Now let's return a partial.  This generally represents a result that is waiting
-on another asynchronous persistent call, which would normally be called by
-a worker.  We'll fire the second call ourselves for this demonstration.
-
-    >>> def innerCall():
-    ...     return 42
-    ...
-    >>> ip = root['ip'] = zc.async.partial.Partial(innerCall)
-    >>> def callPartial():
-    ...     return ip
-    ...
-    >>> p = root['p'] = zc.async.partial.Partial(callPartial)
-    >>> transaction.commit()
-    >>> res = p()
-    >>> res is ip
-    True
-
-While we are waiting for the result, the state is ACTIVE.
-
-    >>> p.state == zc.async.interfaces.ACTIVE
-    True
-
-When we call the inner partial, the result will be placed on the outer partial.
-
-    >>> p.result # None
-    >>> res = ip()
-    >>> p.result
-    42
-    >>> p.state == zc.async.interfaces.COMPLETED
-    True
-
-This is accomplished with callbacks, discussed below in the Callbacks_ section.
-
-Now we'll return a deferred.  The story is almost identical to the
-partial story, except that, in our demonstration, we must handle
-transactions, because the deferred story uses the `twist` module in
-this package to let the Twisted reactor communicate safely with the
-ZODB: see twist.txt for details.
-
-    >>> import twisted.internet.defer
-    >>> inner_d = twisted.internet.defer.Deferred()
-    >>> def callDeferred():
-    ...     return inner_d
-    ...
-    >>> p = root['p2'] = zc.async.partial.Partial(callDeferred)
-    >>> transaction.commit()
-    >>> res = p()
-    >>> res is inner_d
-    True
-    >>> p.state == zc.async.interfaces.ACTIVE
-    True
-    >>> p.result # None
-
-After the deferred receives its result, we need to sync our connection to see
-it.
-
-    >>> inner_d.callback(42)
-    >>> p.result # still None; we need to sync our connection to see the result
-    >>> p.state == zc.async.interfaces.ACTIVE # it's completed, but need to sync
-    True
-    >>> trans = transaction.begin() # sync our connection
-    >>> p.result
-    42
-    >>> p.state == zc.async.interfaces.COMPLETED
-    True
-
-As the last step in looking at the basics, let's look at passing arguments
-into the partial.  They can be persistent objects or generally picklable
-objects, and they can be ordered or keyword arguments.
-
-    >>> class PersistentDemo(persistent.Persistent):
-    ...     def __init__(self, value=0):
-    ...         self.value = value
-    ...
-    >>> root['demo2'] = PersistentDemo()
-    >>> import operator
-    >>> def argCall(ob, ob2=None, value=0, op=operator.add):
-    ...     for o in (ob, ob2):
-    ...         if o is not None:
-    ...             o.value = op(o.value, value)
-    ...
-    >>> p = root['p3'] = zc.async.partial.Partial(
-    ...     argCall, root['demo2'], value=4)
-    >>> transaction.commit()
-    >>> p()
-    >>> root['demo2'].value
-    4
-
-And, of course, this is a partial: we can specify some arguments when the
-partial is made, and some when it is called.
-
-    >>> root['demo3'] = PersistentDemo(10)
-    >>> p = root['p3'] = zc.async.partial.Partial(
-    ...     argCall, root['demo2'], value=4)
-    >>> transaction.commit()
-    >>> p(root['demo3'], op=operator.mul)
-    >>> root['demo2'].value
-    16
-    >>> root['demo3'].value
-    40
-
-This last feature makes partials possible to use for callbacks: our next
-topic.
-
-Callbacks
----------
-
-The partial object can also be used to handle return values and
-exceptions from the call.  The `addCallbacks` method enables the
-functionality.  Its signature is (success=None, failure=None).  It may
-be called multiple times, each time adding a success and/or failure
-callable that takes an end result: a value or a zc.async.Failure object,
-respectively.  Failure objects are passed to failure callables, and
-any other results are passed to success callables.
-
-The return value of the success and failure callables is
-important for chains and for determining whether a partial had any
-errors that need logging, as we'll see below.  The call to
-`addCallbacks` returns a partial, which can be used for chaining (see
-`Chaining Callbacks`_).
-
-Let's look at a simple example.
-
-    >>> def call(*args):
-    ...     res = 1
-    ...     for a in args:
-    ...         res *= a
-    ...     return res
-    ...
-    >>> def callback(res):
-    ...     return 'the result is %r' % (res,)
-    ...
-    >>> p = root['p4'] = zc.async.partial.Partial(call, 2, 3)
-    >>> p_callback = p.addCallbacks(callback)
-    >>> transaction.commit()
-    >>> res = p(4)
-    >>> p.result
-    24
-    >>> res
-    24
-    >>> p_callback.result
-    'the result is 24'
-
-We can now introduce another new concept: unhandled errors. A partial
-with a failure is considered to have an unhandled error if any leaf-node
-callback has a failure result, or if it itself has a failure result and
-has no callbacks.  This convention, if followed, can be used to
-determine whether to highlight the partial as an error in logs or other
-situations.  However, it is only a convention as far as the partial is
-concerned (other elements of the zc.async package may treat it more
-seriously).
-
-    >>> p.unhandled_error
-    False
-    >>> def error():
-    ...     raise RuntimeError('Boo!')
-    ...
-    >>> p = root['p3'] = zc.async.partial.Partial(error)
-    >>> transaction.commit()
-    >>> f = p()
-    >>> p.result
-    <twisted.python.failure.Failure exceptions.RuntimeError>
-    >>> p.unhandled_error
-    True
-    >>> def handleRuntime(f):
-    ...     f.trap(RuntimeError)
-    ...
-    >>> p_callback = p.addCallbacks(failure=handleRuntime)
-    >>> p_callback.state == zc.async.interfaces.COMPLETED
-    True
-    >>> p_callback.result # None
-    >>> p_callback.unhandled_error
-    False
-    >>> p.unhandled_error
-    False
-
-Here are some callback examples adding a success and a failure
-simultaneously.  This one causes a success...
-
-    >>> def multiply(first, second, third=None):
-    ...     res = first * second
-    ...     if third is not None:
-    ...         res *= third
-    ...     return res
-    ...
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, 3)
-    >>> transaction.commit()
-    >>> def success(res):
-    ...     print "success!", res
-    ...
-    >>> def failure(f):
-    ...     print "failure.", f
-    ...
-    >>> p.addCallbacks(success, failure) # doctest: +ELLIPSIS
-    <zc.async.partial.Partial object at ...>
-    >>> res = p()
-    success! 15
-
-...and this one a failure.
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, None)
-    >>> transaction.commit()
-    >>> p.addCallbacks(success, failure) # doctest: +ELLIPSIS
-    <zc.async.partial.Partial object at ...>
-    >>> res = p() # doctest: +ELLIPSIS
-    failure. [Failure instance: Traceback: exceptions.TypeError...]
-
-you can also add multiple callbacks.
-
-    >>> def also_success(val):
-    ...     print "also a success!", val
-    ...
-    >>> def also_failure(f):
-    ...     print "also a failure.", f
-    ...
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, 3)
-    >>> transaction.commit()
-    >>> p.addCallbacks(success) # doctest: +ELLIPSIS
-    <zc.async.partial.Partial object at ...>
-    >>> p.addCallbacks(also_success) # doctest: +ELLIPSIS
-    <zc.async.partial.Partial object at ...>
-    >>> res = p()
-    success! 15
-    also a success! 15
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, None)
-    >>> transaction.commit()
-    >>> p.addCallbacks(failure=failure) # doctest: +ELLIPSIS
-    <zc.async.partial.Partial object at ...>
-    >>> p.addCallbacks(failure=also_failure) # doctest: +ELLIPSIS
-    <zc.async.partial.Partial object at ...>
-    >>> res = p() # doctest: +ELLIPSIS
-    failure. [Failure instance: Traceback: exceptions.TypeError...]
-    also a failure. [Failure instance: Traceback: exceptions.TypeError...]
-
-Chaining Callbacks
-------------------
-
-Sometimes it's desirable to have a chain of callables, so that one callable
-effects the input of another.  The returned partial from addCallables can
-be used for that purpose.  Effectively, the logic for addCallables is this:
-
-    def success_or_failure(success, failure, res):
-        if zc.async.interfaces.IFailure.providedBy(res):
-            if failure is not None:
-                res = failure(res)
-        elif success is not None:
-            res = success(res)
-        return res
-
-    class Partial(...):
-        ...
-        def addCallbacks(self, success=None, failure=None):
-            if success is None and failure is None:
-                return
-            res = Partial(success_or_failure, success, failure)
-            self.callbacks.append(res)
-            return res
-
-Here's a simple chain, then.  We multiply 5 * 3, then that result by 4, then
-print the result in the `success` function.
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, 3)
-    >>> transaction.commit()
-    >>> p.addCallbacks(zc.async.partial.Partial(multiply, 4)
-    ...               ).addCallbacks(success) # doctest: +ELLIPSIS
-    <zc.async.partial.Partial object at ...>
-    >>> res = p()
-    success! 60
-
-A less artificial use case is to handle errors (like try...except) or do
-cleanup (like try...finally).  Here's an example of handling errors.
-
-    >>> def handle_failure(f):
-    ...     return 0
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, None)
-    >>> transaction.commit()
-    >>> p.addCallbacks(
-    ...     failure=handle_failure).addCallbacks(success) # doctest: +ELLIPSIS
-    <zc.async.partial.Partial object at ...>
-    >>> res = p()
-    success! 0
-
-If you recall our discussion of unhandled errors above, then you know
-this means that even though the top partial has a failure, unhandled_error
-is False.
-
-    >>> isinstance(p.result, twisted.python.failure.Failure)
-    True
-    >>> p.unhandled_error
-    False
-
-Callbacks on Completed Partial
-------------------------------
-
-When you add a callback to a partial that has been completed, it is performed
-immediately.
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, 2)
-    >>> transaction.commit()
-    >>> res = p()
-    >>> p.result
-    10
-    >>> p.state == zc.async.interfaces.COMPLETED
-    True
-    >>> p_callback = p.addCallbacks(zc.async.partial.Partial(multiply, 3))
-    >>> p_callback.result
-    30
-    >>> p.state == zc.async.interfaces.COMPLETED
-    True
-
-Chaining Partials
------------------
-
-It's also possible to achieve a somewhat similar pattern by using a
-partial as a success or failure callable, and then add callbacks to the
-second partial.  This differs from the other approach in that you are only
-adding callbacks to one side, success or failure, not the effective
-combined result.
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, 3)
-    >>> transaction.commit()
-    >>> p_callback = p.addCallbacks(success)
-    >>> p2 = zc.async.partial.Partial(multiply, 4)
-    >>> p_callback_2 = p.addCallbacks(p2)
-    >>> p_callback_3 = p2.addCallbacks(also_success)
-    >>> res = p()
-    success! 15
-    also a success! 60
-
-This can be used to handle failures, to some degree.
-
-    >>> def handle_failure(f):
-    ...     return 0
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, None)
-    >>> transaction.commit()
-    >>> p_callback = p.addCallbacks(failure=failure)
-    >>> p2 = zc.async.partial.Partial(handle_failure)
-    >>> p_callback_2 = p.addCallbacks(failure=p2)
-    >>> p_callback_3 = p2.addCallbacks(success)
-    >>> res = p() # doctest: +ELLIPSIS
-    failure. [Failure instance: Traceback: exceptions.TypeError...]
-    success! 0
-
-Failing
--------
-
-Speaking again of failures, it's worth discussing two other aspects of
-failing.  One is that partials offer an explicit way to fail a call.  It can
-be called when the partial is in PENDING or ACTIVE states.  The primary use
-cases for this method are to cancel a partial that is overdue to start, and
-to cancel a partial that was in progress by a worker when the worker died
-(more on that below).
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, 2)
-    >>> transaction.commit()
-    >>> p.fail()
-    >>> print p.result.getTraceback() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
-    Traceback (most recent call last):
-    ...
-    zc.async.interfaces.AbortedError:
-
-`fail` calls all failure callbacks with the failure.
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, 2)
-    >>> p_callback = p.addCallbacks(failure=failure)
-    >>> transaction.commit()
-    >>> res = p.fail() # doctest: +ELLIPSIS
-    failure. [Failure instance: Traceback...zc.async.interfaces.AbortedError...]
-
-As seen above, it fails with zc.async.interfaces.AbortedError by default.
-You can also pass in a different error.
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, 2)
-    >>> transaction.commit()
-    >>> p.fail(RuntimeError('failed'))
-    >>> print p.result.getTraceback() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
-    Traceback (most recent call last):
-    ...
-    exceptions.RuntimeError: failed
-
-As mentioned, if a worker dies when working on an active task, the active task
-should be aborted using `fail`, so the method also
-works if a partial is in the ACTIVE state.  We'll reach under the covers
-to show this.
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, 2)
-    >>> p._state = zc.async.interfaces.ACTIVE
-    >>> transaction.commit()
-    >>> p.fail()
-    >>> print p.result.getTraceback() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
-    Traceback (most recent call last):
-    ...
-    zc.async.interfaces.AbortedError:
-
-It won't work for failing tasks in COMPLETED or CALLBACKS state.
-
-    >>> p.fail()
-    Traceback (most recent call last):
-    ...
-    BadStateError: can only call fail on a partial in PENDING or ACTIVE states
-    >>> p._state = zc.async.interfaces.CALLBACKS
-    >>> p.fail()
-    Traceback (most recent call last):
-    ...
-    BadStateError: can only call fail on a partial in PENDING or ACTIVE states
-
-Using `resumeCallbacks`
------------------------
-
-So `fail` is the proper way to handle an active partial that was being
-worked on by a dead worker, but how does one handle a partial that was in the
-CALLBACKS state?  The answer is to use resumeCallbacks.  Any partial that is
-still pending will be called; any partial that is active will be failed;
-any partial that is in the middle of calling its own callbacks will have its
-`resumeCallbacks` called; and any partial that is completed will be ignored.
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, 2)
-    >>> p._result = 10
-    >>> p._state = zc.async.interfaces.CALLBACKS
-    >>> completed_p = zc.async.partial.Partial(multiply, 3)
-    >>> callbacks_p = zc.async.partial.Partial(multiply, 4)
-    >>> callbacks_p._result = 40
-    >>> callbacks_p._state = zc.async.interfaces.CALLBACKS
-    >>> sub_callbacks_p = callbacks_p.addCallbacks(
-    ...     zc.async.partial.Partial(multiply, 2))
-    >>> active_p = zc.async.partial.Partial(multiply, 5)
-    >>> active_p._state = zc.async.interfaces.ACTIVE
-    >>> pending_p = zc.async.partial.Partial(multiply, 6)
-    >>> for _p in completed_p, callbacks_p, active_p, pending_p:
-    ...     p.callbacks.put(_p)
-    ...
-    >>> transaction.commit()
-    >>> res = completed_p(10)
-    >>> p.resumeCallbacks()
-    >>> sub_callbacks_p.result
-    80
-    >>> sub_callbacks_p.state == zc.async.interfaces.COMPLETED
-    True
-    >>> print active_p.result.getTraceback()
-    ... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
-    Traceback (most recent call last):
-    ...
-    zc.async.interfaces.AbortedError:
-    >>> active_p.state == zc.async.interfaces.COMPLETED
-    True
-    >>> pending_p.result
-    60
-    >>> pending_p.state == zc.async.interfaces.COMPLETED
-    True
-
-Introspecting and Mutating Arguments
-------------------------------------
-
-Partial arguments can be introspected and mutated.
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 5, 3)
-    >>> transaction.commit()
-    >>> p.args
-    [5, 3]
-    >>> p.kwargs
-    {}
-    >>> p.kwargs['third'] = 2
-    >>> p()
-    30
-
-This can allow wrapped callables to have a reference to the partial
-itself.
-
-    >>> def show(v):
-    ...     print v
-    ...
-    >>> p = root['p'] = zc.async.partial.Partial(show)
-    >>> transaction.commit()
-    >>> p.args.append(p)
-    >>> res = p() # doctest: +ELLIPSIS
-    <zc.async.partial.Partial object at ...>
-
-A class method on Partial, `bind`, can simplify this.  It puts the partial as
-the first argument to the callable, as if the callable were bound as a method
-on the partial.
-
-    >>> p = root['p'] = zc.async.partial.Partial.bind(show)
-    >>> transaction.commit()
-    >>> res = p() # doctest: +ELLIPSIS
-    <zc.async.partial.Partial object at ...>
-
-Result and State
-----------------
-
-Partials know about their state, and after a successful call also know
-their result, whether it is a Failure or another value.  Possible states are
-the constants in zc.async.interfaces named PENDING, ACTIVE, CALLBACKS, and
-COMPLETED.
-
-    >>> def showState(partial, *ignore):
-    ...     state = partial.state
-    ...     for nm in 'PENDING', 'ACTIVE', 'CALLBACKS', 'COMPLETED':
-    ...         val = getattr(zc.async.interfaces, nm)
-    ...         if state == val:
-    ...             print nm
-    ...
-    >>> p = root['p'] = zc.async.partial.Partial.bind(showState)
-    >>> transaction.commit()
-    >>> p_callback = p.addCallbacks(zc.async.partial.Partial(showState, p))
-
-    >>> showState(p)
-    PENDING
-    >>> p.result # None
-    >>> res = p()
-    ACTIVE
-    CALLBACKS
-    >>> showState(p)
-    COMPLETED
-
-A partial may only be called when the state is PENDING: calling a
-partial again raises a BadStateError.
-
-    >>> p()
-    Traceback (most recent call last):
-    ...
-    BadStateError: can only call a partial in PENDING state
-
-Other similar restrictions include the following:
-
-- A partial may not call itself [#call_self]_.
-
-- Also, a partial's direct callback may not call the partial
-  [#callback_self]_.
-
-More Partial Introspection
---------------------------
-
-We've already shown that it is possible to introspect unhandled_error,
-state, result, args, and kwargs.  Two other aspects of the basic partial
-functionality are introspectable: callable and callbacks.
-
-The callable is the callable (function or method of a picklable object) that
-the partial will call.  You can change it while the partial is in a pending
-state.
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 2)
-    >>> p.callable is multiply
-    True
-    >>> p.callable = root['demo'].increase
-    >>> p.callable == root['demo'].increase
-    True
-    >>> transaction.commit()
-    >>> root['demo'].counter
-    2
-    >>> res = p()
-    >>> root['demo'].counter
-    4
-
-The callbacks are a queue of the callbacks added by addCallbacks (or the
-currently experimental and underdocumented addCallback).  Currently the
-code may allow for direct mutation of the callbacks, but it is strongly
-suggested that you do not mutate the callbacks, especially not adding them
-except through addCallbacks or addCallback.
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 2, 8)
-    >>> len(p.callbacks)
-    0
-    >>> p_callback = p.addCallbacks(zc.async.partial.Partial(multiply, 5))
-    >>> len(p.callbacks)
-    1
-
-When you use addCallbacks, you actually get a callback to your callback,
-for safety reasons.  Specifically, when you use addCallbacks, the success
-and failure callbacks are actually arguments to another callback--the result
-of the `addCallbacks` call.  If a worker dies while the partial is in
-progress, active argument partials should be cleaned up and will not be
-cleaned up automatically with the logic in `resumeCallbacks` (by design:
-this may not be desired behavior in all cases).  Therefore we add a callback
-to the main callback that does this job.  We return the subsidiary callback
-so that error handling is calculated more as expected (see the
-`unhandled_error` attribute).
-
-    >>> p.callbacks[0] is p_callback
-    False
-    >>> p.callbacks[0] is p_callback.__parent__
-    True
-
-`addCallback` does not have this characteristic (you are responsible for any
-internal partials, therefore).
-
-    >>> p_callback2 = zc.async.partial.Partial(multiply, 9)
-    >>> p_callback2 is p.addCallback(p_callback2)
-    True
-
-To continue with our example of introspecting the partial...
-
-    >>> len(p.callbacks)
-    2
-    >>> p.callbacks[1] is p_callback2
-    True
-    >>> transaction.commit()
-    >>> res = p()
-    >>> p.result
-    16
-    >>> p_callback.result
-    80
-    >>> p_callback2.result
-    144
-    >>> len(p.callbacks)
-    2
-    >>> p.callbacks[0] is p_callback.__parent__
-    True
-    >>> p.callbacks[1] is p_callback2
-    True
-
-The __parent__ attribute should hold the immediate parent of a partial. 
-This means that a pending partial will be within a data manager's queue;
-an active partial will be within a worker's queue (which is within a
-worker, which is within a workers container, which is within a data
-manager); and a callback will be within another partial (which may be
-intermediate to the top level partial, in which case __parent__ of the
-intermediate partial is the top level).  Here's an example.
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 3, 5)
-    >>> p_callback = zc.async.partial.Partial(multiply, 2)
-    >>> p_callback2 = p.addCallbacks(p_callback)
-    >>> p_callback.__parent__ is p_callback2.__parent__
-    True
-    >>> p_callback2.__parent__.__parent__ is p
-    True
-    >>> transaction.abort()
-
-=========
-Footnotes
-=========
-
-.. [#set_up] We'll actually create the state that the text needs here.
-
-    >>> from ZODB.tests.util import DB
-    >>> db = DB()
-    >>> conn = db.open()
-    >>> root = conn.root()
-
-    You must have two adapter registrations: IConnection to
-    ITransactionManager, and IPersistent to IConnection.  We will also
-    register IPersistent to ITransactionManager because the adapter is
-    designed for it.
-
-    >>> from zc.twist import transactionManager, connection
-    >>> import zope.component
-    >>> zope.component.provideAdapter(transactionManager)
-    >>> zope.component.provideAdapter(connection)
-    >>> import ZODB.interfaces
-    >>> zope.component.provideAdapter(
-    ...     transactionManager, adapts=(ZODB.interfaces.IConnection,))
-
-    The partial class can be registered as an adapter for
-    functions and methods.  It needs to be for expected simple usage of
-    addCallbacks.
-
-    >>> import zope.component
-    >>> import types
-    >>> import zc.async.interfaces
-    >>> zope.component.provideAdapter(
-    ...     zc.async.partial.Partial,
-    ...     adapts=(types.FunctionType,),
-    ...     provides=zc.async.interfaces.IDataManagerPartial)
-    >>> zope.component.provideAdapter(
-    ...     zc.async.partial.Partial,
-    ...     adapts=(types.MethodType,),
-    ...     provides=zc.async.interfaces.IDataManagerPartial)
-
-.. [#verify] Verify interface
-
-    >>> from zope.interface.verify import verifyObject
-    >>> verifyObject(zc.async.interfaces.IPartial, p)
-    True
-    
-    Note that state and result are readonly.
-    
-    >>> p.state = 1
-    Traceback (most recent call last):
-    ...
-    AttributeError: can't set attribute
-    >>> p.result = 1
-    Traceback (most recent call last):
-    ...
-    AttributeError: can't set attribute
-
-.. [#no_live_frames] Failures have two particularly dangerous bits: the
-    traceback and the stack.  We use the __getstate__ code on Failures
-    to clean them up.  This makes the traceback (`tb`) None...
-    
-    >>> p.result.tb # None
-    
-    ...and it makes all of the values in the stack--the locals and
-    globals-- into strings.  The stack is a list of lists, in which each
-    internal list represents a frame, and contains five elements: the
-    code name (`f_code.co_name`), the code file (`f_code.co_filename`),
-    the line number (`f_lineno`), an items list of the locals, and an
-    items list for the globals.  All of the values in the items list
-    would normally be objects, but are now strings.
-    
-    >>> for (codename, filename, lineno, local_i, global_i) in p.result.stack:
-    ...     for k, v in local_i:
-    ...         assert isinstance(v, basestring), 'bad local %s' % (v,)
-    ...     for k, v in global_i:
-    ...         assert isinstance(v, basestring), 'bad global %s' % (v,)
-    ...
-    
-    Here's a reasonable question.  The Twisted Failure code has a
-    __getstate__ that cleans up the failure, and that's even what we are
-    using to sanitize the failure.  If the failure is attached to a
-    partial and stored in the ZODB, it is going to be cleaned up anyway.
-     Why explicitly clean up the failure even before it is pickled?
-
-    The answer might be classified as paranoia.  Just in case the failure
-    is kept around in memory longer--by being put on a deferred, or somehow
-    otherwise passed around--we want to eliminate any references to objects
-    in the connection as soon as possible.
-
-    Unfortunately, the __getstate__ code in the Twisted Failure can cause
-    some interaction problems for code that has a __repr__ with side effects--
-    like xmlrpclib, unfortunately.  The `twist` module has a monkeypatch
-    for that particular problem, thanks to Florent Guillaume at Nuxeo, but
-    others may be discovered.
-
-.. [#explicit_failure_example] As the main text describes, if a call raises
-    an exception, the partial will abort the transaction; but if it
-    returns a failure explicitly, the call is responsible for making any
-    desired changes to the transaction (such as aborting) before the
-    partial calls commit.  Compare.  Here is a call that raises an
-    exception, and rolls back changes.
-    
-    (Note that we are passing arguments to the partial, a topic that has
-    not yet been discussed in the text when this footnote is given: read
-    on a bit in the main text to see the details, if it seems surprising
-    or confusing.)
-
-    >>> def callAndRaise(ob):
-    ...     ob.increase()
-    ...     print ob.counter
-    ...     raise RuntimeError
-    ...
-    >>> p = root['raise_exception_example'] = zc.async.partial.Partial(
-    ...     callAndRaise, root['demo'])
-    >>> transaction.commit()
-    >>> root['demo'].counter
-    1
-    >>> res = p() # this shows the result of the print in `callAndRaise` above.
-    2
-    >>> root['demo'].counter # it was rolled back
-    1
-    >>> print p.result.getTraceback() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
-    Traceback (most recent call last):
-    ...
-    exceptions.RuntimeError:
-
-    Here is a call that returns a failure, and does not abort, even though
-    the partial result looks very similar.
-
-    >>> import twisted.python.failure
-    >>> def returnExplicitFailure(ob):
-    ...     ob.increase()
-    ...     try:
-    ...         raise RuntimeError
-    ...     except RuntimeError:
-    ...         # we could have just made and returned a failure without the
-    ...         # try/except, but this is intended to make crystal clear that
-    ...         # exceptions are irrelevant if you catch them and return a
-    ...         # failure
-    ...         return twisted.python.failure.Failure()
-    ...
-    >>> p = root['explicit_failure_example'] = zc.async.partial.Partial(
-    ...     returnExplicitFailure, root['demo'])
-    >>> transaction.commit()
-    >>> res = p()
-    >>> root['demo'].counter # it was not rolled back automatically
-    2
-    >>> print p.result.getTraceback() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
-    Traceback (most recent call last):
-    ...
-    exceptions.RuntimeError:
-
-.. [#call_self] Here's a partial trying to call itself.
-
-    >>> def call(obj, *ignore):
-    ...     return obj()
-    ...
-    >>> p = root['p'] = zc.async.partial.Partial.bind(call)
-    >>> transaction.commit()
-    >>> res = p()
-    >>> print p.result.getTraceback() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
-    Traceback (most recent call last):
-    ...
-    zc.async.interfaces.BadStateError: can only call a partial in PENDING state
-
-.. [#callback_self] Here's a partial's callback trying to call the partial.
-
-    >>> p = root['p'] = zc.async.partial.Partial(multiply, 3, 4)
-    >>> p_callback = p.addCallbacks(
-    ...     zc.async.partial.Partial(call, p)).addCallbacks(failure=failure)
-    >>> transaction.commit()
-    >>> res = p() # doctest: +ELLIPSIS
-    failure. [Failure instance: Traceback: zc.async.interfaces.BadStateError...]
-    >>> p.result # the main partial still ran to completion
-    12

Deleted: zc.async/trunk/src/zc/async/partials_and_transactions.txt
===================================================================
--- zc.async/trunk/src/zc/async/partials_and_transactions.txt	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/partials_and_transactions.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -1,294 +0,0 @@
-This is a document for maintainers and for testing.
-
-Partials manage their own transactions when they are called.  In normal
-use, this means that transactions are committed and aborted by the
-partial itself at the points marked "COMMIT" and "ABORT" in this list
-(other software components will make commits, just not the partial):
-
-- client creates a partial, puts it in a queue, and assigns callbacks to it
-  before it is run.
-- a worker claims a partial
-- a worker calls a partial
-- partial changes state to ACTIVE: COMMIT
-- partial runs the wrapped callable, stores the result on its "result"
-  attribute, changes the state to CALLBACKS, and tries to COMMIT.
-  * if there is a ZODB.POSException.TransactionError, abort and retry 5
-    times, after which ABORT, set a Failure on the result attribute,
-    COMMIT, and skip to `complete`_ step below.
-  * if there is a SystemExit, KeyboardInterrupt, or any non-TransactionError
-    ZODB.POSException.POSError (which includes all ZEO-related storage
-    errors) ABORT and raise.
-  * if there are any other exceptions, ABORT, set a Failure on the result
-    attribute, COMMIT, and skip to `complete`_ step below.
-- If the result of the wrapped callable is a partial or Twisted deferred,
-  add a callable for a method that sets the result, sets the state to
-  CALLBACKS, tries to commit as described above, and then proceeds with
-  the `complete`_ step.  COMMIT and return.
-- _`complete`: for each callback (which is itself a partial), call it.
-  Each callback partial will commit as described here.  The top partial
-  catches no errors while it runs the callbacks.
-- When all callbacks have been called, set state to COMPLETED and COMMIT.
-  if there is a ZODB.POSException.TransactionError, look in the callbacks to
-  see if there is a new one.  If there is, perform it and try again; otherwise,
-  retry this forever, logging every time, because this should not happen
-  except in the case of a new additional callback.
-  logging retries: there should be no conflict errors, because no two
-  workers should be touching this partial.
-- If a callback is added to this completed partial, perform the callback
-  and COMMIT.  If anything fails, including a ConflictError, just raise it.
-  Someone else should abort as necessary.
-- If a callback is added to a partial in any other state, set the partial's
-  _p_changed to True and commit so that we raise a ConflictError, check the
-  state again, and retry if the partial's state changed while we were
-  checking it.
-
-Note the following:
-- if a partial's wrapped callable returns a failure, that means that it
-  is taking responsiblity for any necessary abort: the partial will still
-  attempt to commit.
-- the state never changes out of COMPLETED even when a new callback is
-  added.
-- __call__ *can* raise a ConflictError; the only known way is to have two
-  workers start the same partial, which should not be possible in normal
-  zc.async usage.
-- addCallbacks may raise a ConflictError: this would happen, for instance,
-  when state is COMPLETED so callbacks are performed immediately.
-
-What could go wrong?  In this list "T1" stands for one hypothetical
-thread, and "T2" stands for another hypothetical thread, often
-overlapping in time with T1.
-
-- T1 goes to CALLBACKS state and begins evaluating callbacks.  T2 adds another
-  callback [#set_up]_.  We need to be careful that the callback is executed.
-
-    >>> import threading
-    >>> _thread_lock = threading.Lock()
-    >>> _main_lock = threading.Lock()
-    >>> called = 0
-    >>> def safe_release(lock):
-    ...     while not lock.locked():
-    ...         pass
-    ...     lock.release()
-    ...
-    >>> def locked_call(res=None):
-    ...     global called
-    ...     safe_release(_main_lock)
-    ...     _thread_lock.acquire()
-    ...     called += 1
-    ...
-    >>> def call_from_thread(p):
-    ...     id = p._p_oid
-    ...     def call():
-    ...         conn = db.open()
-    ...         p = conn.get(id)
-    ...         p()
-    ...     return call
-    ...
-    >>> _thread_lock.acquire()
-    True
-    >>> _main_lock.acquire()
-    True
-    >>> import zc.async.partial
-    >>> root['p'] = p = zc.async.partial.Partial(locked_call)
-    >>> p2 = p.addCallbacks(locked_call)
-    >>> import transaction
-    >>> transaction.commit()
-    >>> t = threading.Thread(target=call_from_thread(p))
-    >>> t.start()
-    >>> _main_lock.acquire()
-    True
-    >>> called
-    0
-    >>> trans = transaction.begin()
-    >>> p.state == zc.async.interfaces.ACTIVE
-    True
-    >>> safe_release(_thread_lock)
-    >>> _main_lock.acquire()
-    True
-    >>> called # the main call
-    1
-    >>> trans = transaction.begin()
-    >>> p.state == zc.async.interfaces.CALLBACKS
-    True
-    >>> p2 = p.addCallbacks(locked_call)
-    >>> transaction.commit()
-    >>> safe_release(_thread_lock)
-    >>> _main_lock.acquire()
-    True
-    >>> called # call back number one
-    2
-    >>> safe_release(_thread_lock)
-    >>> safe_release(_thread_lock)
-    >>> while t.isAlive():
-    ...     pass
-    ...
-    >>> called # call back number two
-    ...        # (added while first callback was in progress)
-    3
-    >>> _main_lock.release()
-
-- T1 goes to CALLBACKS state.  In the split second between checking for
-  any remaining callbacks and changing state to COMPLETED, T2 adds a
-  callback and commits.  T1 commits.  T2 thinks that callbacks are still
-  being processed, so does not process the callback, but meanwhile the
-  state is being switched to COMPLETED, and the new callback is never
-  made. For this, we could turn off MVCC, but we don't want to do that
-  if we can help it because of efficiency.  A better solution is to set
-  _p_changed in T2 on the partial, and commit; if there's a conflict
-  error, re-get the state because its change may have caused the
-  conflict.
-
-    >>> import sys
-    >>> class LockedSetter(object):
-    ...     def __init__(self, name, condition, initial=None):
-    ...         self.name = name
-    ...         self.condition = condition
-    ...         self.value = initial
-    ...     def __get__(self, obj, typ=None):
-    ...         if obj is None:
-    ...             return self
-    ...         return getattr(obj, '_z_locked_' + self.name, self.value)
-    ...     def __set__(self, obj, value):
-    ...         if self.condition(obj, value):
-    ...             safe_release(_main_lock)
-    ...             _thread_lock.acquire()
-    ...         setattr(obj, '_z_locked_' + self.name, value)
-    ...
-    >>> import zc.async.partial
-    >>> class Partial(zc.async.partial.Partial):
-    ...     _state = LockedSetter(
-    ...         '_state',
-    ...         lambda o, v: v == zc.async.interfaces.COMPLETED,
-    ...         zc.async.interfaces.PENDING)
-    ...
-    >>> called = 0
-    >>> def call(res=None):
-    ...     global called
-    ...     called += 1
-    ...
-    >>> root['p2'] = p = Partial(call)
-    >>> transaction.commit()
-    >>> _thread_lock.acquire()
-    True
-    >>> _main_lock.acquire()
-    True
-    >>> t = threading.Thread(target=call_from_thread(p))
-    >>> t.start()
-    >>> _main_lock.acquire()
-    True
-    >>> trans = transaction.begin()
-    >>> called
-    1
-    >>> p.state == zc.async.interfaces.CALLBACKS
-    True
-    >>> p2 = p.addCallbacks(call)
-    >>> transaction.commit()
-    >>> safe_release(_thread_lock)
-    >>> _main_lock.acquire()
-    True
-    >>> trans = transaction.begin()
-    >>> called
-    2
-    >>> p.state == zc.async.interfaces.CALLBACKS
-    True
-    >>> safe_release(_thread_lock)
-    >>> safe_release(_thread_lock)
-    >>> while t.isAlive():
-    ...     pass
-    ...
-    >>> _main_lock.release()
-
-  Note, because of this, addCallbacks can raise a ConflictError: it probably
-  means that the state changed out from under it.  Just retry.
-
-- T1 is performing callbacks.  T2 begins and adds a callback.  T1 changes state
-  to COMPLETED and commits.  T2 commits.  If we don't handle it carefully,
-  the callback is never called.  So we handle it carefully.
-
-    >>> _thread_lock.acquire()
-    True
-    >>> _main_lock.acquire()
-    True
-    >>> called = 0
-    >>> root['p3'] = p = zc.async.partial.Partial(call)
-    >>> p1 = p.addCallbacks(locked_call)
-    >>> transaction.commit()
-    >>> t = threading.Thread(target=call_from_thread(p))
-    >>> t.start()
-    >>> _main_lock.acquire()
-    True
-    >>> called
-    1
-    >>> trans = transaction.begin()
-    >>> def call_and_unlock(res):
-    ...     global called
-    ...     called += 1
-    ...
-    >>> p2 = p.addCallbacks(call_and_unlock)
-    >>> safe_release(_thread_lock)
-    >>> safe_release(_thread_lock)
-    >>> while t.isAlive():
-    ...     pass
-    ...
-    >>> called # the main call
-    2
-    >>> transaction.commit() # doctest: +ELLIPSIS
-    Traceback (most recent call last):
-    ...
-    ConflictError: database conflict error (..., class zc.async.partial.Partial)
-    >>> transaction.abort()
-    >>> p2 = p.addCallbacks(call_and_unlock)
-    >>> called
-    3
-    >>> transaction.commit()
-    >>> _main_lock.release()
-
-- T1 adds a callback to COMPLETED state.  It immediately runs the callback.
-  Simultaneously, T2 adds a callback to COMPLETED state.  No problem.
-
-- two workers might claim and start the same partial.  This should
-  already be stopped by workers committing transactions after they claimed
-  them.  This is considered to be a pathological case.
-
-- Generally, if a worker is determined to be dead, and its partials are
-  handed out to other workers, but the worker is actually alive, this can
-  be a serious problem.  This is also considered to be a pathological case.
-
-=========
-Footnotes
-=========
-
-.. [#set_up] We'll actually create the state that the text needs here.
-
-    >>> from ZODB.tests.util import DB
-    >>> db = DB()
-    >>> conn = db.open()
-    >>> root = conn.root()
-
-    You must have two adapter registrations: IConnection to
-    ITransactionManager, and IPersistent to IConnection.  We will also
-    register IPersistent to ITransactionManager because the adapter is
-    designed for it.
-
-    >>> from zc.twist import transactionManager, connection
-    >>> import zope.component
-    >>> zope.component.provideAdapter(transactionManager)
-    >>> zope.component.provideAdapter(connection)
-    >>> import ZODB.interfaces
-    >>> zope.component.provideAdapter(
-    ...     transactionManager, adapts=(ZODB.interfaces.IConnection,))
-
-    We also need to adapt Function and Method to IPartial.
-
-    >>> import zc.async.partial
-    >>> import zc.async.interfaces
-    >>> import zope.component
-    >>> import types
-    >>> zope.component.provideAdapter(
-    ...     zc.async.partial.Partial,
-    ...     adapts=(types.FunctionType,),
-    ...     provides=zc.async.interfaces.IPartial)
-    >>> zope.component.provideAdapter(
-    ...     zc.async.partial.Partial,
-    ...     adapts=(types.MethodType,),
-    ...     provides=zc.async.interfaces.IPartial)

Copied: zc.async/trunk/src/zc/async/queue.py (from rev 85211, zc.async/branches/dev/src/zc/async/queue.py)
===================================================================
--- zc.async/trunk/src/zc/async/queue.py	                        (rev 0)
+++ zc.async/trunk/src/zc/async/queue.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,375 @@
+import datetime
+import bisect
+import pytz
+import persistent
+import persistent.interfaces
+import ZODB.interfaces
+import BTrees.OOBTree
+import BTrees.Length
+import zope.interface
+import zope.component
+import zope.event
+import zope.bforest
+import zc.queue
+import zc.dict
+
+import zc.async.interfaces
+import zc.async.utils
+
+_marker = object()
+
+# purely optional
+ at zope.interface.implementer(zc.async.interfaces.IQueue)
+ at zope.component.adapter(persistent.interfaces.IPersistent)
+def getDefaultQueue(obj):
+    return ZODB.interfaces.IConnection(obj).root()[zc.async.interfaces.KEY]['']
+
+
+class DispatcherAgents(zc.async.utils.Dict):
+    zope.interface.implements(zc.async.interfaces.IDispatcherAgents)
+
+    UUID = None
+    activated = None
+    
+    def __init__(self, uuid):
+        super(DispatcherAgents, self).__init__()
+        self.UUID = uuid
+        self.__class__.last_ping.initialize(self)
+    
+    zc.async.utils.createAtom('last_ping', None)
+    
+    ping_interval = datetime.timedelta(seconds=30)
+    ping_death_interval = datetime.timedelta(seconds=60)
+
+    @property
+    def dead(self):
+        last_ping = self.last_ping
+        if self.activated and (
+            self.last_ping is None or self.activated > self.last_ping):
+            last_ping = self.activated
+        elif last_ping is None:
+            return False
+        return ((last_ping + self.ping_death_interval) <
+                datetime.datetime.now(pytz.UTC))
+        return False
+
+    def __setitem__(self, key, value):
+        if not zc.async.interfaces.IAgent.providedBy(value):
+            raise ValueError('value must be IAgent')
+        if len(value):
+            raise ValueError('cannot add an agent with active jobs')
+        current = self.get(key)
+        if current is not None and len(current):
+            raise ValueError('cannot remove an agent with active jobs')
+        super(DispatcherAgents, self).__setitem__(key, value)
+
+    def pop(self, key, *args):
+        current = self.get(key)
+        if current is not None and len(current):
+            raise ValueError('cannot remove an agent with active jobs')
+        return super(DispatcherAgents, self).pop(key, *args)
+
+    def activate(self):
+        if self.activated:
+            raise ValueError('Already activated')
+        self.activated = datetime.datetime.now(pytz.UTC)
+        zope.event.notify(
+            zc.async.interfaces.DispatcherActivated(self))
+
+    def deactivate(self):
+        if not self.activated:
+            raise ValueError('Not activated')
+        self.activated = None
+        queue = self.parent
+        assert zc.async.interfaces.IQueue.providedBy(queue)
+        for agent in self.values():
+            try:
+                job = agent.pull()
+            except IndexError:
+                pass
+            else:
+                while job is not None:
+                    status = job.status
+                    if status == zc.async.interfaces.ASSIGNED:
+                        tmp = job.assignerUUID
+                        job.assignerUUID = None
+                        job.parent = None
+                        queue.put(job)
+                        job.assignerUUID = tmp
+                    elif job.status == zc.async.interfaces.ACTIVE:
+                        queue.put(job.fail)
+                    elif job.status == zc.async.interfaces.CALLBACKS:
+                        queue.put(job.resumeCallbacks)
+                    elif job.status == zc.async.interfaces.COMPLETED:
+                        # huh, that's odd.
+                        agent.completed.add(job)
+                    try:
+                        job = agent.pull()
+                    except IndexError:
+                        job = None
+        zope.event.notify(
+            zc.async.interfaces.DispatcherDeactivated(self))
+        
+
+class Queues(zc.async.utils.Dict):
+
+    def __setitem__(self, key, value):
+        if not zc.async.interfaces.IQueue.providedBy(value):
+            raise ValueError('value must be IQueue')
+        super(Queues, self).__setitem__(key, value)
+
+
+class Dispatchers(zc.dict.Dict):
+    zope.interface.implements(zc.async.interfaces.IDispatchers)
+
+    __setitem__ = update = pop = __delitem__ = copy = None # simple hide
+
+    def register(self, uuid):
+        if uuid in self:
+            raise ValueError('UUID already registered')
+        da = DispatcherAgents(uuid)
+        da.parent = self.__parent__ # __parent__ should be queue
+        super(Dispatchers, self).__setitem__(uuid, da)
+        zope.event.notify(
+            zc.async.interfaces.DispatcherRegistered(da))
+
+    def unregister(self, uuid):
+        da = self[uuid]
+        if da.activated:
+            raise ValueError('UUID is activated.')
+        da = super(Dispatchers, self).pop(uuid)
+        da.parent = da.name = None
+        zope.event.notify(
+            zc.async.interfaces.DispatcherUnregistered(da, self.__parent__))
+        return da
+
+    def ping(self, uuid):
+        da = self[uuid]
+        if not da.activated:
+            raise ValueError('UUID is not activated.')
+        now = datetime.datetime.now(pytz.UTC)
+        if (da.last_ping is None or
+            da.last_ping + da.ping_interval <= now):
+            da.last_ping = now
+        next = self._getNextActiveSibling(uuid)
+        if next is not None and next.dead:
+            # `next` seems to be a dead dispatcher.
+            next.deactivate()
+
+    def _getNextActiveSibling(self, uuid):
+        for da in self._data.values(min=uuid, excludemin=True):
+            if da.activated:
+                return da
+        for da in self._data.values(max=uuid, excludemax=True):
+            if da.activated:
+                return da
+
+
+class Quota(zc.async.utils.Base):
+
+    zope.interface.implements(zc.async.interfaces.IQuota)
+
+    def __init__(self, name, size):
+        self._data = zc.queue.Queue()
+        self.name = name
+        self.size = size
+
+    def clean(self):
+        for i, job in enumerate(reversed(self._data)):
+            if job.status in (
+                zc.async.interfaces.CALLBACKS,
+                zc.async.interfaces.COMPLETED):
+                self._data.pull(-1-i)
+
+    @property
+    def filled(self):
+        return len(self._data) >= self.size
+
+    def add(self, item):
+        if not zc.async.interfaces.IJob.providedBy(item):
+            raise ValueError('must be IJob')
+        if self.name not in item.quota_names:
+            raise ValueError('quota name must be in quota_names')
+        # self.clean()
+        if self.filled:
+            raise ValueError('Quota is filled')
+        self._data.put(item)
+
+    for nm in ('__len__', '__iter__', '__getitem__', '__nonzero__', 'get', 
+               '__contains__'):
+        locals()[nm] = zc.async.utils.simpleWrapper(nm)
+
+
+class Quotas(zc.dict.Dict):
+
+    __setitem__ = update = pop = __delitem__ = copy = None # simple hide
+
+    def create(self, name, size=1):
+        res = Quota(name, size)
+        super(Quotas, self).__setitem__(name, res)
+        res.parent = self
+
+    def remove(self, name):
+        super(Quotas, self).pop(name)
+
+
+class Queue(zc.async.utils.Base):
+    zope.interface.implements(zc.async.interfaces.IQueue)
+
+    def __init__(self):
+        self._queue = zc.queue.CompositeQueue()
+        self._held = BTrees.OOBTree.OOBTree()
+        self.quotas = Quotas()
+        self.quotas.__parent__ = self
+        self._length = BTrees.Length.Length(0)
+        self.dispatchers = Dispatchers()
+        self.dispatchers.__parent__ = self
+
+    def put(self, item, begin_after=None, begin_by=None):
+        item = zc.async.interfaces.IJob(item)
+        if item.assignerUUID is not None:
+            raise ValueError(
+                'cannot add already-assigned job')
+        for name in item.quota_names:
+            if name not in self.quotas:
+                raise ValueError('unknown quota name', name)
+        now = datetime.datetime.now(pytz.UTC)
+        if begin_after is not None:
+            item.begin_after = begin_after
+        elif item.begin_after is None:
+            item.begin_after = now
+        if begin_by is not None:
+            item.begin_by = begin_by
+        elif item.begin_by is None:
+            item.begin_by = datetime.timedelta(hours=1) # good idea?
+        item.assignerUUID = zope.component.getUtility(
+            zc.async.interfaces.IUUID)
+        if item._p_jar is None:
+            # we need to do this if the job will be stored in another
+            # database as well during this transaction.  Also, _held storage
+            # disambiguates against the database_name and the _p_oid.
+            conn = ZODB.interfaces.IConnection(self)
+            conn.add(item)
+        if now >= item.begin_after:
+            self._queue.put(item)
+        else:
+            self._held[
+                (item.begin_after,
+                 item._p_jar.db().database_name,
+                 item._p_oid)] = item
+        item.parent = self
+        self._length.change(1)
+        return item
+
+    def _iter(self):
+        queue = self._queue
+        tree = self._held
+        q = enumerate(queue)
+        t = iter(tree.items())
+        q_pop = queue.pull
+        t_pop = tree.pop
+        def get_next(i):
+            try:
+                next = i.next()
+            except StopIteration:
+                active = False
+                next = (None, None)
+            else:
+                active = True
+            return active, next
+        q_active, (q_index, q_next) = get_next(q)
+        t_active, (t_index, t_next) = get_next(t)
+        while q_active and t_active:
+            if t_next.begin_after <= q_next.begin_after:
+                yield t_pop, t_index, t_next
+                t_active, (t_index, t_next) = get_next(t)
+            else:
+                yield q_pop, q_index, q_next
+                q_active, (q_index, q_next) = get_next(q)
+        if t_active:
+            yield t_pop, t_index, t_next
+            for (t_index, t_next) in t:
+                yield t_pop, t_index, t_next
+        elif q_active:
+            yield q_pop, q_index, q_next
+            for (q_index, q_next) in q:
+                yield q_pop, q_index, q_next
+
+    def pull(self, index=0):
+        length = len(self)
+        if index < 0:
+            index += length
+            if index < 0:
+                raise IndexError(index + length)
+        if index >= length:
+            raise IndexError(index)
+        for i, (pop, ix, job) in enumerate(self._iter()):
+            if i == index:
+                tmp = pop(ix)
+                assert tmp is job
+                self._length.change(-1)
+                job.assignerUUID = None
+                job.parent = None
+                return job
+        assert False, 'programmer error: the length appears to be incorrect.'
+
+    def claim(self, filter=None, default=None):
+        now = datetime.datetime.now(pytz.UTC)
+        if not self._length():
+            return default
+        uuid = None
+        for pop, ix, job in self._iter():
+            res = None
+            if job.begin_after > now:
+                break
+            quotas = []
+            if (job.begin_after + job.begin_by) < now:
+                res = zc.async.interfaces.IJob(
+                        job.fail) # specify TimeoutError?
+                res.begin_after = now
+                res.begin_by = datetime.timedelta(hours=1)
+                res.parent = self
+                if uuid is None:
+                    uuid = zope.component.getUtility(zc.async.interfaces.IUUID)
+                res.assignerUUID = uuid
+            else:
+                for name in job.quota_names:
+                    quota = self.quotas.get(name)
+                    if quota is not None:
+                        quota.clean()
+                        if quota.filled:
+                            break
+                        quotas.append(quota)
+                else:
+                    res = job
+            if res is not None and (filter is None or filter(res)):
+                tmp = pop(ix)
+                assert tmp is job
+                self._length.change(-1)
+                for quota in quotas:
+                    quota.add(job)
+                job.parent = None
+                return res
+        return default
+
+    def __len__(self):
+        return self._length()
+
+    def __iter__(self):
+        return (next for pop, ix, next in self._iter())
+
+    def __nonzero__(self):
+        return bool(self._length())
+
+    def __getitem__(self, index):
+        length = len(self)
+        if index < 0:
+            index += length
+            if index < 0:
+                raise IndexError(index + length)
+        if index >= length:
+            raise IndexError(index)
+        for i, (pop, ix, job) in enumerate(self._iter()):
+            if i == index:
+                return job
+        assert False, 'programmer error: the length appears to be incorrect.'

Copied: zc.async/trunk/src/zc/async/queue.txt (from rev 85211, zc.async/branches/dev/src/zc/async/queue.txt)
===================================================================
--- zc.async/trunk/src/zc/async/queue.txt	                        (rev 0)
+++ zc.async/trunk/src/zc/async/queue.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,857 @@
+The queue module contains the queues that zc.async clients use to
+deposit jobs, and the collections of dispatchers and their agents.
+
+The dispatchers expect to find queues in a mapping off the root of the
+database in a key given in zc.async.interfaces.KEY, so we'll follow that
+pattern, even though it doesn't matter too much for our examples
+[#setUp]_.
+
+    >>> import zc.async.queue
+    >>> import zc.async.interfaces
+    >>> container = root[zc.async.interfaces.KEY] = zc.async.queue.Queues()
+
+Now we can add a queue.  The collection sets the ``parent`` and ``name``
+attributes.
+
+    >>> queue = container[''] = zc.async.queue.Queue()
+    >>> queue.name
+    ''
+    >>> queue.parent is container
+    True
+    >>> import transaction
+    >>> transaction.commit()
+
+[#queues_collection]_ As shown in the README.txt of this package (or see
+zc.async.adapters.defaultQueueAdapter), the queue with the name '' will
+typically be registered as an adapter to persistent objects that
+provides zc.async.interfaces.IQueue [#verify]_. 
+
+The queue doesn't have any jobs yet.
+
+    >>> len(queue)
+    0
+    >>> bool(queue)
+    False
+    >>> list(queue)
+    []
+
+It also doesn't have any regsitered dispatchers.
+
+    >>> len(queue.dispatchers)
+    0
+    >>> bool(queue.dispatchers)
+    False
+    >>> list(queue.dispatchers)
+    []
+
+We'll look at the queue as a collection of jobs; then we'll look at the
+``dispatcher`` agents collection; and then we'll look at how the two
+interact.
+
+Queues and Jobs
+===============
+
+As described in the README, we can put jobs to be performed now in the
+queue, and jobs to be performed later.  The collection can be
+introspected, and then agents can ``claim`` a job or simply remove it with
+``pull``; we'll examine the differences between these calls below.
+
+We'll start by adding a job to be performed as soon as possible.  Note
+that we'll use a testing tool that lets us control the current time
+generated by datetime.datetime.now with a `zc.async.testing.set_now`
+callable.  This code also expects that a UUID will be registered as a
+zc.async.interfaces.IUUID utility with the empty name ('').
+
+    >>> from zc.async.instanceuuid import UUID
+
+    >>> import zc.async.testing
+    >>> zc.async.testing.setUpDatetime()
+
+    >>> def mock_work():
+    ...     return 42
+    ...
+    >>> job = queue.put(mock_work)
+    >>> len(queue)
+    1
+    >>> list(queue) == [job]
+    True
+    >>> queue[0] is job
+    True
+    >>> bool(queue)
+    True
+    >>> job.parent is queue
+    True
+    >>> transaction.commit()
+    
+A job added without any special calls gets a `begin_after` attribute
+of now.
+
+    >>> import datetime
+    >>> import pytz
+    >>> now = datetime.datetime.now(pytz.UTC) 
+    >>> now
+    datetime.datetime(2006, 8, 10, 15, 44, 22, 211, tzinfo=<UTC>)
+    >>> job.begin_after == now
+    True
+
+A ``begin_by`` attribute is a duration, and defaults to one hour.  This
+means that it must be completed an hour after the ``begin_after`` datetime,
+or else the system will fail it.
+
+    >>> job.begin_by == datetime.timedelta(hours=1)
+    True
+
+Now let's add a job to be performed later, using ``begin_after``.
+
+This means that it's immediately ready to be performed: we can ``claim`` it.
+This is the API that agents call on a queue to get a job.
+
+    >>> job is queue.claim()
+    True
+    >>> job.parent is None
+    True
+
+Now the queue is empty.
+
+    >>> len(queue)
+    0
+    >>> list(queue)
+    []
+
+You can specify a begin_after date when you make the call.  Then the job
+isn't due immediately.
+
+    >>> import operator
+    >>> import zc.async.job
+    >>> job2 = queue.put(
+    ...     zc.async.job.Job(operator.mul, 7, 6),
+    ...     datetime.datetime(2006, 8, 10, 16, tzinfo=pytz.UTC))
+    ...
+    >>> len(queue)
+    1
+    >>> job2.begin_after
+    datetime.datetime(2006, 8, 10, 16, 0, tzinfo=<UTC>)
+    >>> queue.claim() is None
+    True
+
+When the time passes, it is available to be claimed.
+
+    >>> zc.async.testing.set_now(
+    ...     datetime.datetime(2006, 8, 10, 16, tzinfo=pytz.UTC))
+    >>> job2 is queue.claim()
+    True
+    >>> len(queue)
+    0
+
+Jobs are ordered by their begin_after dates for all operations, including
+claiming and iterating.
+
+    >>> job3 = queue.put(
+    ...     zc.async.job.Job(operator.mul, 14, 3),
+    ...     datetime.datetime(2006, 8, 10, 16, 2, tzinfo=pytz.UTC))
+    >>> job4 = queue.put(
+    ...     zc.async.job.Job(operator.mul, 21, 2),
+    ...     datetime.datetime(2006, 8, 10, 16, 1, tzinfo=pytz.UTC))
+    >>> job5 = queue.put(
+    ...     zc.async.job.Job(operator.mul, 42, 1),
+    ...     datetime.datetime(2006, 8, 10, 16, 0, tzinfo=pytz.UTC))
+
+    >>> list(queue) == [job5, job4, job3]
+    True
+    >>> queue[2] is job3
+    True
+    >>> queue[1] is job4
+    True
+    >>> queue[0] is job5
+    True
+
+    >>> job5 is queue.claim()
+    True
+    >>> len(queue)
+    2
+
+The ``pull`` method is a way to remove jobs without the connotation of
+"claiming" them.  This has several implications that we'll dig into later.
+For now, we'll just use it to pull a job, and then return it.
+
+    >>> job4 is queue.pull()
+    True
+    >>> job4 is queue.put(job4)
+    True
+    >>> list(queue) == [job4, job3]
+    True
+
+Let's add another job without an explicit due date.
+
+    >>> job6 = queue.put(
+    ...     zc.async.job.Job(operator.mod, 85, 43))
+    >>> list(queue) == [job6, job4, job3]
+    True
+
+Pre-dating (before now) is equivalent to not passing a datetime.
+
+    >>> job7 = queue.put(
+    ...     zc.async.job.Job(operator.and_, 43, 106),
+    ...     begin_after=datetime.datetime(2006, 8, 10, 15, 35, tzinfo=pytz.UTC))
+    ...
+    >>> list(queue) == [job6, job7, job4, job3]
+    True
+
+Other timezones are normalized to UTC.
+
+    >>> job8 = queue.put(
+    ...     zc.async.job.Job(operator.or_, 40, 10),
+    ...     pytz.timezone('EST').localize(
+    ...         datetime.datetime(2006, 8, 10, 11, 30)))
+    ...
+    >>> job8.begin_after
+    datetime.datetime(2006, 8, 10, 16, 30, tzinfo=<UTC>)
+
+Naive timezones are not allowed.
+
+    >>> queue.put(mock_work, datetime.datetime(2006, 8, 10, 16, 15))
+    Traceback (most recent call last):
+    ...
+    ValueError: cannot use timezone-naive values
+
+``claim``
+---------
+
+Above we have mentioned ``claim`` and ``pull``.  The semantics of ``pull``
+are the same as zc.queue: it pulls the first item off the queue, unless you
+specify an index.
+
+    >>> first = queue[0]
+    >>> first is queue.pull(0)
+    True
+    >>> last = queue[-1]
+    >>> last is queue.pull(-1)
+    True
+    >>> first is queue.put(first)
+    True
+    >>> last is queue.put(last)
+    True
+
+``claim`` is similar in that it removes an item from the queue.  However,
+it has several different behaviors:
+
+- It only will give jobs for which the begin_after value is >= now.
+
+- If begin_after + begin_by >= now, a job that makes the original job fail
+  is used instead.
+
+- If a job has one or more ``quota_names`` and the associated quotas are
+  filled with jobs not in the CALLBACKS or COMPLETED status then it will
+  not be returned.
+
+- It does not take an index argument.
+
+- It does take a ``filter`` argument, which takes a job and returns a boolean
+  True if the job can be accepted.
+
+- If no results are available, it returns None, or a default you pass in,
+  rather than raising IndexError.
+
+The ``claim`` method is intended to be the primary interface for agents
+interacting with the queue.
+
+Let's examine the behavior of ``claim`` with some examples.  We've already
+seen the most basic usage: it has returned the first item in the queue.
+
+Right now the queue has five jobs.  Only two are ready to be started at this
+time because of the begin_after values.
+
+    >>> list(queue) == [job7, job6, job4, job3, job8]
+    True
+    >>> [j for j in queue
+    ...  if j.begin_after <= datetime.datetime.now(pytz.UTC)] == [job7, job6]
+    True
+    >>> queue.claim() is job7
+    True
+    >>> queue.claim() is job6
+    True
+    >>> print queue.claim()
+    None
+
+Now let's set the time to the begin_after of the last job, but then use a
+filter that only accepts jobs that do the ``operator.or_`` job (job8).
+
+    >>> zc.async.testing.set_now(queue[-1].begin_after)
+    >>> [j for j in queue
+    ...  if j.begin_after <= datetime.datetime.now(pytz.UTC)] == [
+    ...  job4, job3, job8]
+    True
+    >>> def only_or(job):
+    ...     return job.callable is operator.or_
+    ...
+    >>> queue.claim(only_or) is job8
+    True
+    >>> print queue.claim(only_or)
+    None
+
+These filters, as used by agents, allow control over what jobs happen for a
+given dispatcher, which typically equates to a given process.
+
+The quotas allow control over what jobs are happening globally for a
+given queue.  They are limits, not goals: if you create a quota that can
+have a maximum of 1 active job, this will limit jobs that identify
+themselves with this quota name to be performed only one at a time, or
+serialized.  (To be clear, unlike some uses of the word "quota, "it will
+not cause a *preference* for jobs that identify themselves with this
+name.)
+
+Let's use some quotas.
+
+Our current jobs are not a part of any quotas.  We'll try to add some
+quota_names.
+
+    >>> job4.quota_names
+    ()
+    >>> job3.quota_names
+    ()
+    >>> job4.quota_names = ('content catalog',)
+    Traceback (most recent call last):
+    ...
+    ValueError: ('unknown quota name', 'content catalog')
+
+The same kind of error happens if we try to put a job with unknown quota
+names in a queue.
+
+    >>> job4 is queue.pull()
+    True
+    >>> print job4.parent
+    None
+    >>> job4.status == zc.async.interfaces.NEW
+    True
+    >>> job4.quota_names = ('content catalog',)
+    >>> queue.put(job4)
+    Traceback (most recent call last):
+    ...
+    ValueError: ('unknown quota name', 'content catalog')
+
+Note that the attribute on the job is quota_names: it expects an iterable
+of strings, not a string.  The code tries to help catch type errors, at a
+trivial level, by bailing out on strings:
+
+    >>> job4.quota_names = ''
+    Traceback (most recent call last):
+    ...
+    TypeError: provide an iterable of names
+    >>> job4.quota_names
+    ('content catalog',)
+
+We need to add the quota to the queue to be able to add it.
+
+    >>> queue.quotas.create('content catalog', 1)
+    >>> quota = queue.quotas['content catalog']
+    >>> quota.name
+    'content catalog'
+    >>> quota.parent is queue.quotas
+    True
+    >>> quota.size
+    1
+    >>> list(quota)
+    []
+
+Now we can add job4.  We'll make job3 specify the same quota while it is in
+the queue.
+
+    >>> job4 is queue.put(job4)
+    True
+    >>> job3.quota_names = ('content catalog',)
+
+Now I can claim job4 and put in a (stub) agent.  Until job4 has moved to the
+CALLBACKS or COMPLETED status, I will be unable to claim job4.
+
+    >>> import zope.interface
+    >>> import persistent.list
+    >>> import BTrees
+    >>> class Completed(persistent.Persistent):
+    ...     def __init__(self):
+    ...         self._data = BTrees.family64.IO.BTree()
+    ...     def add(self, value):
+    ...         key = zc.async.utils.dt_to_long(
+    ...             datetime.datetime.now(pytz.UTC)) + 15
+    ...         while key in self._data:
+    ...             key -= 1
+    ...         value.key = key
+    ...         self._data[key] = value
+    ...     def first(self):
+    ...         return self._data[self._data.minKey()]
+    ...     def __iter__(self):
+    ...         return self._data.values()
+    ...
+    >>> class StubAgent(persistent.list.PersistentList):
+    ...     zope.interface.implements(zc.async.interfaces.IAgent)
+    ...     parent = name = None
+    ...     size = 3
+    ...     def __init__(self):
+    ...         self.completed = Completed()
+    ...         persistent.list.PersistentList.__init__(self)
+    ...     @property
+    ...     def queue(self):
+    ...         return self.parent.parent
+    ...     def claimJob(self):
+    ...         if len(self) < self.size:
+    ...             job = self.queue.claim()
+    ...             if job is not None:
+    ...                 job.parent = self
+    ...                 self.append(job)
+    ...                 return job
+    ...     def pull(self):
+    ...         return self.pop(0)
+    ...     def jobCompleted(self, job):
+    ...         self.completed.add(job)
+    ...         
+
+    >>> job4 is queue.claim()
+    True
+    >>> job4.parent = StubAgent()
+    >>> job4.status == zc.async.interfaces.ASSIGNED
+    True
+    >>> list(quota) == [job4]
+    True
+    >>> [j.quota_names for j in queue]
+    [('content catalog',)]
+    >>> print queue.claim()
+    None
+    >>> job4()
+    42
+    >>> job4.status == zc.async.interfaces.COMPLETED
+    True
+    >>> job3 is queue.claim()
+    True
+
+The final characteristic of ``claim`` to review is that jobs that have
+timed out are returned in wrappers that fail the original job.
+
+    >>> job9_from_outer_space = queue.put(mock_work)
+    >>> zc.async.testing.set_now(
+    ...     datetime.datetime.now(pytz.UTC) +
+    ...     job9_from_outer_space.begin_by + datetime.timedelta(seconds=1))
+    >>> job9 = queue.claim()
+    >>> job9 is job9_from_outer_space
+    False
+    >>> stub = root['stub'] = StubAgent() 
+    >>> job9.parent = stub
+    >>> transaction.commit()
+    >>> job9()
+    >>> job9_from_outer_space.status == zc.async.interfaces.COMPLETED
+    True
+    >>> print job9_from_outer_space.result.getTraceback()
+    Traceback (most recent call last):
+    Failure: zc.async.interfaces.AbortedError: 
+    <BLANKLINE>
+    
+
+Dispatchers
+===========
+
+When a queue is installed, dispatchers register and activate themselves.
+Dispatchers typically get their UUID from the instanceuuid module in
+this package, but we will generate our own here.
+
+First we'll register dispatcher using the instance UUID we introduced near the
+beginning of this document.
+
+    >>> UUID in queue.dispatchers
+    False
+    >>> queue.dispatchers.register(UUID)
+    >>> UUID in queue.dispatchers
+    True
+
+The registration fired off an event.  This may be used by subscribers to
+create some agents, if desired.
+
+    >>> from zope.component import eventtesting
+    >>> import zc.async.interfaces
+    >>> evs = eventtesting.getEvents(
+    ...     zc.async.interfaces.IDispatcherRegistered)
+    >>> evs # doctest: +ELLIPSIS
+    [<zc.async.interfaces.DispatcherRegistered object at ...>]
+
+We can get the dispatcher's collection of agents (an IDispatcherAgents)
+now from the ``dispatchers`` collection.  This is the object attached to
+the event seen above.
+
+    >>> verifyObject(zc.async.interfaces.IDispatchers, queue.dispatchers)
+    True
+    >>> da = queue.dispatchers[UUID]
+    >>> verifyObject(zc.async.interfaces.IDispatcherAgents, da)
+    True
+    >>> da.UUID == UUID
+    True
+    >>> da.parent is queue
+    True
+
+    >>> evs[0].object is da
+    True
+
+[#check_dispatchers_mapping]_ The object is not activated, and has not
+been pinged.
+
+    >>> print da.activated
+    None
+    >>> print da.last_ping
+    None
+
+When the object's ``last_ping`` + ``ping_interval`` is greater than now,
+a new ``last_ping`` should be recorded, as we'll see below.  If the
+``last_ping`` (or ``activated``, if more recent) +
+``ping_death_interval`` is older than now, the dispatcher is considered to
+be ``dead``.
+
+    >>> da.ping_interval
+    datetime.timedelta(0, 30)
+    >>> da.ping_death_interval
+    datetime.timedelta(0, 60)
+    >>> da.dead
+    False
+
+Now we'll activate the dispatcher.
+
+    >>> import datetime
+    >>> import pytz
+    >>> now = datetime.datetime.now(pytz.UTC)
+    >>> da.activate()
+    >>> now <= da.activated <= datetime.datetime.now(pytz.UTC) 
+    True
+
+It's still not dead. :-)
+
+    >>> da.dead
+    False
+
+This also fired an event.
+
+    >>> evs = eventtesting.getEvents(
+    ...     zc.async.interfaces.IDispatcherActivated)
+    >>> evs # doctest: +ELLIPSIS
+    [<zc.async.interfaces.DispatcherActivated object at ...>]
+    >>> evs[0].object is da
+    True
+
+Now a dispatcher should iterate over agents and look for jobs.  There are
+not any agents at the moment.
+
+    >>> len(da)
+    0
+
+Agents are a pluggable part of the design.  The implementation in this
+package is a reasonable default.  For this document, we'll use a simple
+and very incomplete stub.  See other documents in this package for use
+of the default.
+
+In real usage, perhaps a subscriber to one of the events above will add
+an agent, or a user will create one manually.  We'll add our stub to the
+DispatcherAgents collection.
+
+    >>> agent = da['main'] = StubAgent()
+    >>> agent.name
+    'main'
+    >>> agent.parent is da
+    True
+
+Now, if we had a real dispatcher for our UUID, every few seconds it would poll
+its agents for any new jobs.  It would also call ``ping`` on the
+queue.dispatchers object.  Let's do an imaginary run.
+
+    >>> import zc.twist
+    >>> import twisted.python.failure
+    >>> def getJob(agent):
+    ...     try:
+    ...         job = agent.claimJob()
+    ...     except zc.twist.EXPLOSIVE_ERRORS:
+    ...         raise
+    ...     except:
+    ...         agent.failure = zc.twist.sanitize(
+    ...             twisted.python.failure.Failure())
+    ...         # we'd log here too
+    ...         job = None
+    ...     return job
+    >>> jobs_to_do = []
+    >>> def doJobsStub(job):
+    ...     jobs_to_do.append(job)
+    ...
+    >>> activated = set()
+    >>> import ZODB.POSException
+    >>> def pollStub(conn):
+    ...     for queue in conn.root()['zc.async'].values():
+    ...         if UUID not in queue.dispatchers:
+    ...             queue.dispatchers.register(UUID)
+    ...         da = queue.dispatchers[UUID]
+    ...         if queue._p_oid not in activated:
+    ...             if da.activated:
+    ...                 if da.dead:
+    ...                     da.deactivate()
+    ...                 else:
+    ...                     # log problem
+    ...                     print "already activated: another process?"
+    ...                     continue
+    ...             da.activate()
+    ...             activated.add(queue._p_oid)
+    ...             # be sure to remove if transaction fails
+    ...             try:
+    ...                 transaction.commit()
+    ...             except (SystemExit, KeyboardInterrupt):
+    ...                 transaction.abort()
+    ...                 raise
+    ...             except:
+    ...                 # log problem
+    ...                 print "problem..."
+    ...                 transaction.abort()
+    ...                 activated.remove(queue._p_oid)
+    ...                 continue
+    ...         for agent in da.values():
+    ...             job = getJob(agent)
+    ...             while job is not None:
+    ...                 doJobsStub(job)
+    ...                 job = getJob(agent)
+    ...         queue.dispatchers.ping(UUID)
+    ...         try:
+    ...             transaction.commit()
+    ...         except (SystemExit, KeyboardInterrupt):
+    ...             transaction.abort()
+    ...             raise
+    ...         except:
+    ...             # log problem
+    ...             print "problem..."
+    ...             transaction.abort()
+    ...             activated.remove(key)
+    ...             continue
+    ...
+
+Running this now will generate an "already activated" warning, because we've
+already manually activated the agent.  Normally this would only happen when
+the same instance were started more than once simultaneously--a situation that
+could be accomplished with ``zopectl start`` followed by ``zopectl debug`` for
+instance.
+
+    >>> pollStub(conn)
+    already activated: another process?
+
+So, we'll just put the queue's oid in ``activated``.
+
+    >>> activated.add(queue._p_oid)
+
+Now, when we poll, we get a ping.
+
+    >>> before = datetime.datetime.now(pytz.UTC)
+    >>> pollStub(conn)
+    >>> before <= da.last_ping <= datetime.datetime.now(pytz.UTC)
+    True
+
+We don't have any jobs to claim yet.  Let's add one and do it again. 
+We'll use a test fixture, time_flies, to make the time change.
+
+    >>> job10 = queue.put(mock_work)
+
+    >>> def time_flies(seconds):
+    ...     zc.async.testing.set_now(
+    ...         datetime.datetime.now(pytz.UTC) +
+    ...         datetime.timedelta(seconds=seconds))
+    ...
+
+    >>> last_ping = da.last_ping
+    >>> time_flies(5)
+    >>> pollStub(conn)
+    >>> da.last_ping == last_ping
+    True
+
+    >>> len(jobs_to_do)
+    1
+    >>> print queue.claim()
+    None
+
+The ping_time won't change for at least another da.ping_interval from the
+original ping.  We've already gone through 5 seconds.  We'll fly through
+10 and then 15 for the rest.
+
+    >>> time_flies(10)
+    >>> pollStub(conn)
+    >>> da.last_ping == last_ping
+    True
+    >>> time_flies(15)
+    >>> pollStub(conn)
+    >>> da.last_ping > last_ping
+    True
+
+Dead Dispatchers
+----------------
+
+What happens when a dispatcher dies?  If it is the only one, that's the
+end: when it restarts it should clean out the old jobs in its agents and
+then proceed.  But what if a queue has more than one simultaneous
+dispatcher?  How do we know to clean out the dead dispatcher's jobs?
+
+The ``ping`` method not only changes the ``last_ping`` but checks the
+next sibling dispatcher, as defined by UUID, to make sure that it is not
+dead. It uses the ``dead`` attribute, introduced above, to test whether
+the sibling is alive.
+
+We'll introduce another virtual dispatcher to show this behavior.
+
+    >>> import uuid
+    >>> alt_UUID = uuid.uuid1()
+    >>> queue.dispatchers.register(alt_UUID)
+    >>> alt_da = queue.dispatchers[alt_UUID]
+    >>> alt_da.activate()
+    >>> zc.async.testing.set_now(
+    ...     datetime.datetime.now(pytz.UTC) +
+    ...     alt_da.ping_death_interval + datetime.timedelta(seconds=1))
+    >>> alt_da.dead
+    True
+    >>> bool(alt_da.activated)
+    True
+    >>> pollStub(conn)
+    >>> bool(alt_da.activated)
+    False
+
+Let's do that again, with an agent in the new dispatcher and some jobs in the
+agent.  Assigned jobs will be reassigned; in-progress jobs will have a
+new task that fails them; callback jobs will resume their callback; and
+completed jobs will be moved to the completed collection.
+
+    >>> alt_agent = alt_da['main'] = StubAgent()
+    >>> alt_agent.size = 4
+    >>> alt_da.activate()
+    >>> jobA = queue.put(mock_work)
+    >>> jobB = queue.put(mock_work)
+    >>> jobC = queue.put(mock_work)
+    >>> jobD = queue.put(mock_work)
+    >>> jobE = queue.put(mock_work)
+    >>> jobA is alt_agent.claimJob()
+    True
+    >>> jobB is alt_agent.claimJob()
+    True
+    >>> jobC is alt_agent.claimJob()
+    True
+    >>> jobD is alt_agent.claimJob()
+    True
+    >>> print alt_agent.claimJob()
+    None
+    >>> len(alt_agent)
+    4
+    >>> len(queue)
+    1
+    >>> jobB._status = zc.async.interfaces.ACTIVE
+    >>> jobC._status = zc.async.interfaces.CALLBACKS
+    >>> jobD()
+    42
+    >>> jobD.status == zc.async.interfaces.COMPLETED
+    True
+    >>> queue.dispatchers.ping(alt_UUID)
+    >>> zc.async.testing.set_now(
+    ...     datetime.datetime.now(pytz.UTC) +
+    ...     alt_da.ping_death_interval + datetime.timedelta(seconds=1))
+    >>> alt_da.dead
+    True
+    >>> queue.dispatchers.ping(UUID)
+    >>> bool(alt_da.activated)
+    False
+    >>> len(alt_agent)
+    0
+    >>> len(queue)
+    4
+    >>> queue[1] is jobA
+    True
+    >>> queue[2].callable == jobB.fail
+    True
+    >>> queue[3].callable == jobC.resumeCallbacks
+    True
+    >>> alt_agent.completed.first() is jobD
+    True
+
+If you have multiple workers, it is strongly suggested that you get the
+associated servers connected to a shared time server.
+
+=========
+Footnotes
+=========
+
+.. [#setUp] We'll actually create the state that the text needs here.
+
+    >>> from ZODB.tests.util import DB
+    >>> db = DB()
+    >>> conn = db.open()
+    >>> root = conn.root()
+    >>> import zc.async.configure
+    >>> zc.async.configure.base()
+
+.. [#queues_collection] The queues collection is a simple mapping that only
+    allows queues to be inserted.
+
+    >>> len(container)
+    1
+    >>> list(container.keys())
+    ['']
+    >>> list(container)
+    ['']
+    >>> list(container.values()) == [queue]
+    True
+    >>> list(container.items()) == [('', queue)]
+    True
+    >>> container.get('') is queue
+    True
+    >>> container.get(2) is None
+    True
+    >>> container[''] is queue
+    True
+    >>> container['foo']
+    Traceback (most recent call last):
+    ...
+    KeyError: 'foo'
+    
+    >>> container['foo'] = None
+    Traceback (most recent call last):
+    ...
+    ValueError: value must be IQueue
+    
+    >>> del container['']
+    >>> len(container)
+    0
+    >>> list(container)
+    []
+    >>> list(container.keys())
+    []
+    >>> list(container.items())
+    []
+    >>> list(container.values())
+    []
+    >>> container.get('') is None
+    True
+    >>> queue.name is None
+    True
+    >>> queue.parent is None
+    True
+
+    >>> container[''] = queue
+
+.. [#verify] Verify queue interface.
+
+    >>> from zope.interface.verify import verifyObject
+    >>> verifyObject(zc.async.interfaces.IQueue, queue)
+    True
+
+.. [#check_dispatchers_mapping]
+
+    >>> len(queue.dispatchers)
+    1
+    >>> list(queue.dispatchers.keys()) == [UUID]
+    True
+    >>> list(queue.dispatchers) == [UUID]
+    True
+    >>> list(queue.dispatchers.values()) == [da]
+    True
+    >>> list(queue.dispatchers.items()) == [(UUID, da)]
+    True
+    >>> queue.dispatchers.get(UUID) is da
+    True
+    >>> queue.dispatchers.get(2) is None
+    True
+    >>> queue.dispatchers[UUID] is da
+    True
+    >>> queue.dispatchers[2]
+    Traceback (most recent call last):
+    ...
+    KeyError: 2

Deleted: zc.async/trunk/src/zc/async/rwproperty.py
===================================================================
--- zc.async/trunk/src/zc/async/rwproperty.py	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/rwproperty.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -1,75 +0,0 @@
-# Read & write properties
-#
-# Copyright (c) 2006 by Philipp "philiKON" von Weitershausen
-#                       philikon at philikon.de
-#
-# Freely distributable under the terms of the Zope Public License, v2.1.
-#
-# See rwproperty.txt for detailed explanations
-#
-import sys
-
-__all__ = ['getproperty', 'setproperty', 'delproperty']
-
-class rwproperty(object):
-
-    def __new__(cls, func):
-        name = func.__name__
-
-        # ugly, but common hack
-        frame = sys._getframe(1)
-        locals = frame.f_locals
-
-        if name not in locals:
-            return cls.createProperty(func)
-
-        oldprop = locals[name]
-        if isinstance(oldprop, property):
-            return cls.enhanceProperty(oldprop, func)
-
-        raise TypeError("read & write properties cannot be mixed with "
-                        "other attributes except regular property objects.")
-
-    # this might not be particularly elegant, but it's easy on the eyes
-
-    @staticmethod
-    def createProperty(func):
-        raise NotImplementedError
-
-    @staticmethod
-    def enhanceProperty(oldprop, func):
-        raise NotImplementedError
-
-class getproperty(rwproperty):
-
-    @staticmethod
-    def createProperty(func):
-        return property(func)
-
-    @staticmethod
-    def enhanceProperty(oldprop, func):
-        return property(func, oldprop.fset, oldprop.fdel)
-
-class setproperty(rwproperty):
-
-    @staticmethod
-    def createProperty(func):
-        return property(None, func)
-
-    @staticmethod
-    def enhanceProperty(oldprop, func):
-        return property(oldprop.fget, func, oldprop.fdel)
-
-class delproperty(rwproperty):
-
-    @staticmethod
-    def createProperty(func):
-        return property(None, None, func)
-
-    @staticmethod
-    def enhanceProperty(oldprop, func):
-        return property(oldprop.fget, oldprop.fset, func)
-
-if __name__ == "__main__":
-    import doctest
-    doctest.testfile('rwproperty.txt')

Deleted: zc.async/trunk/src/zc/async/rwproperty.txt
===================================================================
--- zc.async/trunk/src/zc/async/rwproperty.txt	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/rwproperty.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -1,140 +0,0 @@
-Read & write properties
-========================
-
-:Author:   Philipp von Weitershausen
-:Email:    philikon at philikon.de
-:License:  Zope Public License, v2.1
-
-Motivation
-----------
-
-Using method decorators and descriptors like ``property``, we can
-easily create computed attributes:
-
-  >>> class JamesBrown(object):
-  ...     @property
-  ...     def feel(self):
-  ...         return self._feel
-
-An attribute like this cannot be written, though.  You would have to
-do something like this:
-
-  >>> class JamesBrown(object):
-  ...     def _getFeel(self):
-  ...         return self._feel
-  ...     def _setFeel(self, feel):
-  ...         self._feel = feel
-  ...     feel = property(_getFeel, _setFeel)
-
-The problem with this approach is that it leaves the getter and setter
-sitting around in the class namespace.  It also lacks the compact
-spelling of a decorator solution.  To cope with that, some people like
-to write:
-
-  >>> class JamesBrown(object):
-  ...     @apply
-  ...     def feel():
-  ...         def get(self):
-  ...             return self._feel
-  ...         def set(self, feel):
-  ...             self._feel = feel
-  ...         return property(get, set)
-
-This spelling feels rather cumbersome, apart from the fact that
-``apply`` is `going to go away`_ in Python 3000.
-
-.. _going to go away: http://www.python.org/peps/pep-3000.html#id24
-
-
-Goal
-----
-
-There should be a way to declare a read & write property and still use
-the compact and easy decorator spelling.  The read & write properties
-should be as easy to use as the read-only property.  We explicitly
-don't want that immediately called function that really just helps us
-name the attribute and create a local scope for the getter and setter.
-
-
-Read & write property
----------------------
-
-Read & write properties work like regular properties.  You simply
-define a method and then apply a decorator, except that you now don't
-use ``@property`` but ``@getproperty`` to mark the getter and
-``@setproperty`` to mark the setter:
-
-  >>> from rwproperty import getproperty, setproperty
-  >>> class JamesBrown(object):
-  ...     @getproperty
-  ...     def feel(self):
-  ...         return self._feel
-  ...     @setproperty
-  ...     def feel(self, feel):
-  ...         self._feel = feel
-
-  >>> i = JamesBrown()
-  >>> i.feel
-  Traceback (most recent call last):
-  ...
-  AttributeError: 'JamesBrown' object has no attribute '_feel'
-
-  >>> i.feel = "good"
-  >>> i.feel
-  'good'
-
-The order in which getters and setters are declared doesn't matter:
-
-  >>> from rwproperty import getproperty, setproperty
-  >>> class JamesBrown(object):
-  ...     @setproperty
-  ...     def feel(self, feel):
-  ...         self._feel = feel
-  ...     @getproperty
-  ...     def feel(self):
-  ...         return self._feel
-
-  >>> i = JamesBrown()
-  >>> i.feel = "good"
-  >>> i.feel
-  'good'
-
-Of course, deleters are also possible:
-
-  >>> from rwproperty import delproperty
-  >>> class JamesBrown(object):
-  ...     @setproperty
-  ...     def feel(self, feel):
-  ...         self._feel = feel
-  ...     @getproperty
-  ...     def feel(self):
-  ...         return self._feel
-  ...     @delproperty
-  ...     def feel(self):
-  ...         del self._feel
-
-  >>> i = JamesBrown()
-  >>> i.feel = "good"
-  >>> del i.feel
-  >>> i.feel
-  Traceback (most recent call last):
-  ...
-  AttributeError: 'JamesBrown' object has no attribute '_feel'
-
-
-Edge cases
-----------
-
-There might be a case where you're using a flavour of read & write
-properties and already have a non-property attribute of the same name
-defined:
-
-  >>> class JamesBrown(object):
-  ...     feel = "good"
-  ...     @getproperty
-  ...     def feel(self):
-  ...         return "so good"
-  ...
-  Traceback (most recent call last):
-  ...
-  TypeError: read & write properties cannot be mixed with other attributes except regular property objects.

Modified: zc.async/trunk/src/zc/async/subscribers.py
===================================================================
--- zc.async/trunk/src/zc/async/subscribers.py	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/subscribers.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -1,29 +1,27 @@
-import os
+import threading
+import signal
 import transaction
-import transaction.interfaces
-import ZODB.interfaces
-import twisted.internet.reactor
+import twisted.internet.selectreactor
 import zope.component
-import zope.event
-import zope.app.appsetup.interfaces
 import zc.twist
 
-import zc.async.datamanager
 import zc.async.interfaces
-import zc.async.engine
+import zc.async.queue
+import zc.async.agent
+import zc.async.dispatcher
+import zc.async.utils
 
-NAME = 'zc.async.datamanager'
+class QueueInstaller(object):
 
-class InstallerAndNotifier(object):
-
-    def __init__(self, name=NAME,
-                 factory=lambda *args: zc.async.datamanager.DataManager(),
-                 get_folder=lambda r: r):
-        zope.component.adapter(
-            zope.app.appsetup.interfaces.IDatabaseOpenedEvent)(self)
-        self.name = name
+    def __init__(self, queues=('',),
+                 factory=lambda *args: zc.async.queue.Queue(),
+                 db_name=None):
+        # This IDatabaseOpenedEvent will be from zope.app.appsetup if that
+        # package is around
+        zope.component.adapter(zc.async.interfaces.IDatabaseOpenedEvent)(self)
+        self.db_name = db_name
         self.factory = factory
-        self.get_folder = get_folder
+        self.queues = queues
 
     def __call__(self, ev):
         db = ev.database
@@ -33,62 +31,104 @@
         try:
             try:
                 root = conn.root()
-                folder = self.get_folder(root)
-                tm.commit()
-                if self.name not in folder:
-                    folder[self.name] = self.factory(conn, folder)
-                    if folder[self.name]._p_jar is None:
-                        conn.add(folder[self.name])
-                elif not zc.async.interfaces.IDataManager.providedBy(
-                    folder[self.name]):
-                    raise RuntimeError(
-                        'IDataManager not found') # TODO better error
-                zope.event.notify(
-                    zc.async.interfaces.DataManagerAvailable(folder[self.name]))
-                tm.commit()
+                if zc.async.interfaces.KEY not in root:
+                    if self.db_name is not None:
+                        other = conn.get_connection(self.db_name)
+                        queues = other.root()[
+                            zc.async.interfaces.KEY] = zc.async.queue.Queues()
+                        other.add(queues)
+                    else:
+                        queues = zc.async.queue.Queues()
+                    root[zc.async.interfaces.KEY] = queues
+                    tm.commit()
+                    zc.async.utils.log.info('queues collection added')
+                else:
+                    queues = root[zc.async.interfaces.KEY]
+                for queue_name in self.queues:
+                    if queue_name not in queues:
+                        queues[queue_name] = self.factory(conn, queue_name)
+                        tm.commit()
+                        zc.async.utils.log.info('queue %r added', queue_name)
             except:
                 tm.abort()
                 raise
         finally:
             conn.close()
 
-basicInstallerAndNotifier = InstallerAndNotifier()
+queue_installer = QueueInstaller()
+multidb_queue_installer = QueueInstaller(db_name='async')
 
-class SeparateDBCreation(object):
-    def __init__(self, db_name='zc.async', name=NAME,
-                 factory=zc.async.datamanager.DataManager,
-                 get_folder=lambda r:r):
-        self.db_name = db_name
-        self.name = name
-        self.factory = factory
-        self.get_folder = get_folder
+class ThreadedDispatcherInstaller(object):
+    def __init__(self,
+                 poll_interval=5,
+                 reactor_factory=twisted.internet.selectreactor.SelectReactor):
+        self.poll_interval = poll_interval
+        self.reactor_factory = reactor_factory
+        # This IDatabaseOpenedEvent will be from zope.app.appsetup if that
+        # package is around
+        zope.component.adapter(zc.async.interfaces.IDatabaseOpenedEvent)(self)
 
-    def __call__(self, conn, folder):
-        conn2 = conn.get_connection(self.db_name)
-        tm = transaction.interfaces.ITransactionManager(conn)
-        root = conn2.root()
-        folder = self.get_folder(root)
-        tm.commit()
-        if self.name in folder:
-            raise ValueError('data manager already exists in separate database',
-                             self.db_name, folder, self.name)
-        dm = folder[self.name] = self.factory()
-        conn2.add(dm)
-        tm.commit()
-        return dm
+    def __call__(self, ev):
+        reactor = self.reactor_factory()
+        dispatcher = zc.async.dispatcher.Dispatcher(
+            ev.database, reactor, poll_interval=self.poll_interval)
+        def start():
+            dispatcher.activate()
+            reactor.run(installSignalHandlers=0)
+        thread = threading.Thread(target=start)
+        thread.setDaemon(True)
+        thread.start()
+    
+        # The above is really sufficient. This signal registration, below, is
+        # an optimization. The dispatcher, on its next run, will eventually
+        # figure out that it is looking at a previous incarnation of itself if
+        # these handlers don't get to clean up.
+        # We do this with signal handlers rather than atexit.register because
+        # we want to clean up before the database is closed, if possible. ZODB
+        # does not provide an appropriate hook itself as of this writing.
+        curr_sigint_handler = signal.getsignal(signal.SIGINT)
+        def sigint_handler(*args):
+            reactor.callFromThread(reactor.stop)
+            thread.join(3)
+            curr_sigint_handler(*args)
+    
+        def handler(*args):
+            reactor.callFromThread(reactor.stop)
+            raise SystemExit()
+    
+        signal.signal(signal.SIGINT, sigint_handler)
+        signal.signal(signal.SIGTERM, handler)
+        # Catch Ctrl-Break in windows
+        if getattr(signal, "SIGBREAK", None) is not None:
+            signal.signal(signal.SIGBREAK, handler)
 
-installerAndNotifier = InstallerAndNotifier(factory=SeparateDBCreation())
+threaded_dispatcher_installer = ThreadedDispatcherInstaller()
 
- at zope.component.adapter(zc.async.interfaces.IDataManagerAvailableEvent)
-def installTwistedEngine(ev):
-    engine = zc.async.engine.Engine(
-        zope.component.getUtility(
-            zc.async.interfaces.IUUID, 'instance'),
-        zc.async.datamanager.Worker)
-    dm = ev.object
-    twisted.internet.reactor.callLater(
-        0,
-        zc.twist.Partial(engine.poll, dm))
-    twisted.internet.reactor.addSystemEventTrigger(
-        'before', 'shutdown', zc.twist.Partial(
-            engine.tearDown, dm))
+class AgentInstaller(object):
+
+    def __init__(self, agent_name, chooser=None, size=3, queue_names=None):
+        zope.component.adapter(
+            zc.async.interfaces.IDispatcherActivated)(self)
+        self.queue_names = queue_names
+        self.agent_name = agent_name
+        self.chooser = chooser
+        self.size = size
+
+    def __call__(self, ev):
+        dispatcher = ev.object
+        if (self.queue_names is None or
+            dispatcher.parent.name in self.queue_names):
+            if self.agent_name not in dispatcher:
+                dispatcher[self.agent_name] = zc.async.agent.Agent(
+                    chooser=self.chooser, size=self.size)
+                zc.async.utils.log.info(
+                    'agent %r added to queue %r',
+                    self.agent_name,
+                    dispatcher.parent.name)
+            else:
+                zc.async.utils.log.info(
+                    'agent %r already in queue %r',
+                    self.agent_name,
+                    dispatcher.parent.name)
+
+agent_installer = AgentInstaller('main')
\ No newline at end of file

Copied: zc.async/trunk/src/zc/async/subscribers.txt (from rev 85211, zc.async/branches/dev/src/zc/async/subscribers.txt)
===================================================================
--- zc.async/trunk/src/zc/async/subscribers.txt	                        (rev 0)
+++ zc.async/trunk/src/zc/async/subscribers.txt	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,206 @@
+The subscribers module provides several conveniences for starting and
+configuring zc.async.  Let's assume we have a database and all of the
+necessary adapters and utilities registered [#setUp]_.
+
+The first helper we'll discuss is ``threaded_dispatcher_installer``.  This can be
+used as a subscriber to a DatabaseOpened event, as defined by zope.app.appsetup
+if you are using it, and defined by zc.async.interfaces if you are not. It is
+an instance of ``ThreadedDispatcherInstaller``, which, as the name implies, is
+a class to create handlers that install a threaded dispatcher.
+
+We will install a dispatcher that polls a bit faster than the default five
+seconds, so that we can have an easier time in running this doctest.
+
+    >>> import zc.async.subscribers
+    >>> import zc.async.interfaces
+    >>> import zope.event
+    >>> import zope.component
+    >>> isinstance(zc.async.subscribers.threaded_dispatcher_installer,
+    ...            zc.async.subscribers.ThreadedDispatcherInstaller)
+    True
+    >>> zc.async.subscribers.threaded_dispatcher_installer.poll_interval
+    5
+    >>> threaded_installer = zc.async.subscribers.ThreadedDispatcherInstaller(
+    ...     poll_interval=0.5)
+    >>> zope.component.provideHandler(threaded_installer)
+    >>> zope.event.notify(zc.async.interfaces.DatabaseOpened(db))
+
+Now a dispatcher is installed and running.  (The get_poll helper is defined in
+the first footnote.)
+
+    >>> import zc.async.dispatcher
+    >>> dispatcher = zc.async.dispatcher.get()
+    >>> dispatcher.poll_interval
+    0.5
+    >>> get_poll(0)
+    {}
+
+The function also installs some signal handlers to optimize shutdown.  We'll
+look at them soon.  For now, let's install some queues.
+
+The subscribers module also includes helpers to install a queues collection
+and zero or more queues.  The QueueInstaller class lets you specify an
+iterable of names of queues to install, defaulting to ('',); a factory to
+generate queues, defaulting to something that generates a zc.async.queue.Queue;
+and a db_name if the queues collection should be placed in another database of
+the given name, for a multi-database setup, defaulting to None, indicating that
+the queues should be placed in the same database.
+
+Two instances of this class are already instantiated in the module; one with
+the defaults, and one specifying an additional database.
+
+    >>> isinstance(zc.async.subscribers.queue_installer,
+    ...            zc.async.subscribers.QueueInstaller)
+    True
+    >>> zc.async.subscribers.queue_installer.queues
+    ('',)
+    >>> print zc.async.subscribers.queue_installer.db_name
+    None
+    >>> isinstance(zc.async.subscribers.multidb_queue_installer,
+    ...            zc.async.subscribers.QueueInstaller)
+    True
+    >>> zc.async.subscribers.multidb_queue_installer.queues
+    ('',)
+    >>> zc.async.subscribers.multidb_queue_installer.db_name
+    'async'
+
+Let's try the multidb variation out.  We'll need another database, and the
+proper data structure set up on the two of them.  The first footnote of this
+file sets the necessary data structures up.
+
+The subscribers generated by this class expect to get the same event we fired
+above, an IDatabaseOpenedEvent. Normally only one of these events fires, since
+the database generally opens once, but for the purposes of our example we will
+fire it again in a moment.
+
+While we're at it, we'll use the other handler: ``AgentInstaller``.  This
+class generates a subscriber that installs agents in the queues it finds when
+dispatcher agent activation events fire.  You must specify an agent name to
+use; and can specify a chooser (a way to choose the tasks this agent should
+perform), a size (the number of concurrent jobs this agent should hand out),
+and specific queue names in which the agent should be installed, defaulting to
+None, or all queues.
+
+The agent_installer installs an agent named 'main' for the active dispatcher
+in all queues, with a default FIFO chooser.
+
+    >>> isinstance(zc.async.subscribers.agent_installer,
+    ...            zc.async.subscribers.AgentInstaller)
+    True
+    >>> zc.async.subscribers.agent_installer.agent_name
+    'main'
+    >>> print zc.async.subscribers.agent_installer.queue_names
+    None
+    >>> print zc.async.subscribers.agent_installer.chooser
+    None
+    >>> zc.async.subscribers.agent_installer.size
+    3
+
+Now we can install the subscribers and give it a try.  As we said above,
+normally the database opened event only fires once; this is just for purpose of
+demonstration.  We unregister the previous handler so nothing gets confused.
+
+    >>> zope.component.getGlobalSiteManager().unregisterHandler(
+    ...     threaded_installer)
+    True
+    >>> zope.component.provideHandler(
+    ...     zc.async.subscribers.multidb_queue_installer)
+    >>> zope.component.provideHandler(
+    ...     zc.async.subscribers.agent_installer)
+    >>> zope.event.notify(zc.async.interfaces.DatabaseOpened(db))
+
+Now if we look in the database, we'll find a queues collection in another
+database, with a queue, with a dispatcher, with an agent.
+
+    >>> import pprint
+    >>> pprint.pprint(get_poll())
+    {'': {'main': {'active jobs': [],
+                   'error': None,
+                   'len': 0,
+                   'new jobs': [],
+                   'size': 3}}}
+    >>> conn = db.open()
+    >>> root = conn.root()
+    >>> root._p_jar is conn
+    True
+    >>> queues = root[zc.async.interfaces.KEY]
+    >>> root[zc.async.interfaces.KEY]._p_jar is conn
+    False
+    >>> queues.keys()
+    ['']
+    >>> queue = queues['']
+    >>> len(queue.dispatchers)
+    1
+    >>> da = queue.dispatchers.values()[0]
+    >>> list(da)
+    ['main']
+    >>> bool(da.activated)
+    True
+
+Finally, we mentioned at the start that the threaded dispatcher installer also
+installed some signal handlers.  Let's show a SIGINT (CTRL-C, usually), and
+how it deactivates the dispatcher's agents collection in the ZODB.
+
+    >>> import signal
+    >>> import os
+    >>> if getattr(os, 'getpid', None) is not None: # UNIXEN, not Windows
+    ...     pid = os.getpid()
+    ...     try:
+    ...         os.kill(pid, signal.SIGINT)
+    ...     except KeyboardInterrupt:
+    ...         if dispatcher.activated:
+    ...             assert False, 'dispatcher did not deactivate'
+    ...     else:
+    ...         print "failed to send SIGINT, or something"
+    ... else:
+    ...     dispatcher.reactor.callFromThread(dispatcher.reactor.stop)
+    ...     for i in range(30):
+    ...         if not dispatcher.activated:
+    ...             break
+    ...         time.sleep(0.1)
+    ...     else:
+    ...         assert False, 'dispatcher did not deactivate'
+    ...
+    >>> import transaction
+    >>> t = transaction.begin() # sync
+    >>> bool(da.activated)
+    False
+
+.. ......... ..
+.. Footnotes ..
+.. ......... ..
+
+.. [#setUp] Below we set up a database, provide the adapters and utilities
+    that the code expects, and then define some helper functions we'll use in
+    the examples.  See README_2 for a discussion of what is going on with the
+    configuration.
+
+    >>> databases = {}
+    >>> import ZODB.FileStorage
+    >>> storage = ZODB.FileStorage.FileStorage(
+    ...     'main.fs', create=True)
+    
+    >>> async_storage = ZODB.FileStorage.FileStorage(
+    ...     'async.fs', create=True)
+
+    >>> from ZODB.DB import DB 
+    >>> databases[''] = db = DB(storage)
+    >>> databases['async'] = async_db = DB(async_storage)
+    >>> async_db.databases = db.databases = databases
+    >>> db.database_name = ''
+    >>> async_db.database_name = 'async'
+
+    >>> import zc.async.configure
+    >>> zc.async.configure.base()
+
+    >>> import time
+    >>> def get_poll(count = None):
+    ...     if count is None:
+    ...         count = len(dispatcher.polls)
+    ...     for i in range(30):
+    ...         if len(dispatcher.polls) > count:
+    ...             return dispatcher.polls.first()
+    ...         time.sleep(0.1)
+    ...     else:
+    ...         assert False, 'no poll!'
+    ...

Copied: zc.async/trunk/src/zc/async/testing.py (from rev 85211, zc.async/branches/dev/src/zc/async/testing.py)
===================================================================
--- zc.async/trunk/src/zc/async/testing.py	                        (rev 0)
+++ zc.async/trunk/src/zc/async/testing.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,148 @@
+
+import threading
+import bisect
+import datetime
+
+import pytz
+
+
+_now = None
+
+old_datetime = datetime.datetime
+
+def set_now(dt):
+    global _now
+    _now = _datetime(*dt.__reduce__()[1])
+
+
+class _datetime(old_datetime):
+    @classmethod
+    def now(klass, tzinfo=None):
+        if tzinfo is None:
+            return _now.replace(tzinfo=None)
+        else:
+            return _now.astimezone(tzinfo)
+    @classmethod
+    def utcnow(klass):
+        return _now.replace(tzinfo=None)
+    def astimezone(self, tzinfo):
+        return _datetime(
+            *super(_datetime,self).astimezone(tzinfo).__reduce__()[1])
+    def replace(self, *args, **kwargs):
+        return _datetime(
+            *super(_datetime,self).replace(
+                *args, **kwargs).__reduce__()[1])
+    def __repr__(self):
+        raw = super(_datetime, self).__repr__()
+        return "datetime.datetime%s" % (
+            raw[raw.index('('):],)
+    def __reduce__(self):
+        return (argh, super(_datetime, self).__reduce__()[1])
+def argh(*args, **kwargs):
+    return _datetime(*args, **kwargs)
+
+_datetime.max = _datetime(*old_datetime.max.__reduce__()[1])
+
+def setUpDatetime():
+    datetime.datetime = _datetime
+    set_now(datetime.datetime(2006, 8, 10, 15, 44, 22, 211, pytz.UTC))
+
+def tearDownDatetime():
+    datetime.datetime = old_datetime
+
+
+class Reactor(object):
+
+    def __init__(self):
+        self.started = False
+        self.calls = []
+        self.triggers = []
+        self._lock = threading.Lock()
+        self._threads = []
+
+    # necessary reactor methods
+
+    def callLater(self, delay, callable, *args, **kw):
+        if not self.started:
+            raise ValueError('not started')
+        res = (datetime.timedelta(seconds=delay) + _now, callable, args, kw)
+        self._lock.acquire()
+        try:
+            bisect.insort(self.calls, res)
+        finally:
+            self._lock.release()
+        # normally we're supposed to return something but not needed
+
+    def callFromThread(self, callable, *args, **kw):
+        if not self.started:
+            raise ValueError('not started')
+        self._lock.acquire()
+        try:
+            bisect.insort(
+                self.calls,
+                (_now, callable, args, kw))
+        finally:
+            self._lock.release()
+
+    def addSystemEventTrigger(self, _when, _event, _callable, *args, **kwargs):
+        assert _when == 'before' and _event == 'shutdown', (
+            'unsupported trigger')
+        self.triggers.append((_when, _event, _callable, args, kwargs))
+    
+    def callInThread(self, _callable, *args, **kw):
+        # very naive should be fine...
+        thread = threading.Thread(target=_callable, args=args, kwargs=kw)
+        self._threads.append(thread)
+        thread.start()
+
+    def callWhenRunning(self, _callable, *args, **kw):
+        self._lock.acquire()
+        try:
+            bisect.insort(self.calls, (_now, _callable, args, kw))
+        finally:
+            self._lock.release()
+
+    # end reactor methods
+
+    def _get_next(self, end):
+        self._lock.acquire()
+        try:
+            if self.calls and self.calls[0][0] <= end:
+                return self.calls.pop(0)
+        finally:
+            self._lock.release()
+
+    def start(self):
+        setUpDatetime()
+        self.started = True
+
+    def stop(self):
+        for when, event, callable, args, kwargs in self.triggers:
+            callable(*args, **kwargs)
+        self.started = False
+        tearDownDatetime()
+
+    def time_flies(self, seconds):
+        if not self.started:
+            raise ValueError('not started')
+        end = _now + datetime.timedelta(seconds=seconds)
+        ct = 0
+        next = self._get_next(end)
+        while next is not None:
+            now, callable, args, kw = next
+            set_now(now)
+            callable(*args, **kw) # normally this would get try...except
+            ct += 1
+            next = self._get_next(end)
+        set_now(end)
+        return ct
+
+    def time_passes(self):
+        if not self.started:
+            raise ValueError('not started')
+        next = self._get_next(_now)
+        if next is not None:
+            discard, callable, args, kw = next
+            callable(*args, **kw)
+            return True
+        return False

Modified: zc.async/trunk/src/zc/async/tests.py
===================================================================
--- zc.async/trunk/src/zc/async/tests.py	2008-04-10 03:12:27 UTC (rev 85211)
+++ zc.async/trunk/src/zc/async/tests.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -1,66 +1,57 @@
 import os
-import shutil
 import unittest
 
-from zope.testing import doctest, module
+from zope.testing import doctest, module, loggingsupport
 import zope.component
 import zope.component.testing
 import zope.component.eventtesting
-import zc.async.partial
-import zc.async.subscribers
+import zc.async.interfaces
+import zc.async.testing
 
-def modSetUp(test):
-    zope.component.testing.setUp(test)
-    module.setUp(test, 'zc.async.doctest_test')
-
-def modTearDown(test):
-    module.tearDown(test)
-    zope.component.testing.tearDown(test)
-
 def uuidSetUp(test):
-    test.globs['old_instance_home'] = os.environ.get("INSTANCE_HOME")
-    os.environ['INSTANCE_HOME'] = os.path.join(os.path.dirname(
-        zc.async.interfaces.__file__), '_test_tmp')
-    os.mkdir(os.environ['INSTANCE_HOME'])
-    os.mkdir(os.path.join(os.environ['INSTANCE_HOME'], 'etc'))
+    import zc.async.interfaces
+    os.environ['ZC_ASYNC_UUID'] = os.path.join(os.path.dirname(
+        zc.async.interfaces.__file__), 'uuid.txt')
+    import zc.async.instanceuuid
+    uuid = zc.async.instanceuuid.getUUID()
+    if uuid != zc.async.instanceuuid.UUID: # test run changed it...
+        zc.async.instanceuuid.UUID = uuid
 
 def uuidTearDown(test):
-    shutil.rmtree(os.environ['INSTANCE_HOME'])
-    if test.globs['old_instance_home'] is None:
-        del os.environ['INSTANCE_HOME']
-    else:
-        os.environ['INSTANCE_HOME'] = test.globs['old_instance_home']
-    del test.globs['old_instance_home']
+    os.remove(os.environ['ZC_ASYNC_UUID'])
 
-def readmeSetUp(test):
-    modSetUp(test)
+def modSetUp(test):
     uuidSetUp(test)
+    zope.component.testing.setUp(test)
+    module.setUp(test, 'zc.async.doctest_test')
     zope.component.eventtesting.setUp(test)
-    test.globs['installerAndNotifier'] = (
-        zc.async.subscribers.basicInstallerAndNotifier)
-    from zc.async import instanceuuid
-    instanceuuid.UUID = instanceuuid.getUUID()
-    zope.component.provideUtility(instanceuuid.UUID, name='instance')
+    test.globs['event_logs'] = loggingsupport.InstalledHandler(
+        'zc.async.events')
+    test.globs['trace_logs'] = loggingsupport.InstalledHandler(
+        'zc.async.trace')
 
-def altReadmeSetUp(test):
-    modSetUp(test)
-    uuidSetUp(test)
-    zope.component.eventtesting.setUp(test)
-    test.globs['installerAndNotifier'] = (
-        zc.async.subscribers.installerAndNotifier)
-    from zc.async import instanceuuid
-    instanceuuid.UUID = instanceuuid.getUUID()
-    zope.component.provideUtility(instanceuuid.UUID, name='instance')
-
-def readmeTearDown(test):
-    r = test.globs.get('faux')
-    if r:
-        for when, eventname, callable in r.triggers:
-            if eventname == 'shutdown': # test didn't run to completion
-                # let's clean up
-                callable()
+def modTearDown(test):
+    import transaction
+    transaction.abort()
+    import zc.async.dispatcher
+    zc.async.dispatcher.clear()
     uuidTearDown(test)
-    modTearDown(test)
+    zc.async.testing.tearDownDatetime()
+    module.tearDown(test)
+    zope.component.testing.tearDown(test)
+    import signal
+    signal.signal(signal.SIGINT, signal.default_int_handler)
+    if 'storage' in test.globs:
+        test.globs['db'].close()
+        test.globs['storage'].close()
+        test.globs['storage'].cleanup()
+    if 'async_storage' in test.globs:
+        test.globs['async_db'].close()
+        test.globs['async_storage'].close()
+        test.globs['async_storage'].cleanup()
+    for logs in (test.globs['event_logs'], test.globs['trace_logs']):
+        logs.clear()
+        logs.uninstall()
 
 def test_instanceuuid():
     """This module provides access to a UUID that is intended to uniquely
@@ -105,24 +96,42 @@
     would have done that...though maybe that's not unfortunate :-) )
 
     """
+def test_long_to_dt():
+    """The utils module provides two methods to convert a date to a long
+    and back again.  Dates in the future get smaller and smaller, so
+    dates are arranged from newest to oldest in a BTree.  It leaves an
+    extra 4 bits at the bottom.  It can convert all possible datetimes.
+    
+    >>> from zc.async.utils import long_to_dt, dt_to_long
+    >>> import datetime
+    >>> now = datetime.datetime.now()
+    >>> isinstance(dt_to_long(now), long)
+    True
+    >>> now == long_to_dt(dt_to_long(now))
+    True
+    >>> now == long_to_dt(dt_to_long(now)+15)
+    True
+    >>> datetime.datetime.max == long_to_dt(dt_to_long(datetime.datetime.max))
+    True
+    >>> CE = datetime.datetime(1,1,1)
+    >>> CE == long_to_dt(dt_to_long(CE))
+    True
+    """
 
 def test_suite():
     return unittest.TestSuite((
         doctest.DocTestSuite(setUp=uuidSetUp, tearDown=uuidTearDown),
         doctest.DocFileSuite(
-            'partial.txt',
-            'partials_and_transactions.txt',
-            'datamanager.txt',
+            'job.txt',
+            'jobs_and_transactions.txt',
+            'queue.txt',
+            'agent.txt',
+            'dispatcher.txt',
+            'subscribers.txt',
+            'README.txt',
+            'README_2.txt',
             setUp=modSetUp, tearDown=modTearDown,
             optionflags=doctest.INTERPRET_FOOTNOTES),
-        doctest.DocFileSuite(
-            'README.txt',
-            setUp=readmeSetUp, tearDown=readmeTearDown,
-            optionflags=doctest.INTERPRET_FOOTNOTES),
-        doctest.DocFileSuite(
-            'README.txt',
-            setUp=altReadmeSetUp, tearDown=readmeTearDown,
-            optionflags=doctest.INTERPRET_FOOTNOTES),
         ))
 
 

Copied: zc.async/trunk/src/zc/async/utils.py (from rev 85211, zc.async/branches/dev/src/zc/async/utils.py)
===================================================================
--- zc.async/trunk/src/zc/async/utils.py	                        (rev 0)
+++ zc.async/trunk/src/zc/async/utils.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,224 @@
+import datetime
+import logging
+import sys
+
+import rwproperty
+import persistent
+import zc.dict
+import pytz
+import zope.bforest.periodic
+
+
+def simpleWrapper(name):
+    # notice use of "simple" in function name!  A sure sign of trouble!
+    def wrapper(self, *args, **kwargs):
+        return getattr(self._data, name)(*args, **kwargs)
+    return wrapper
+
+log = logging.getLogger('zc.async.events')
+tracelog = logging.getLogger('zc.async.trace')
+
+class Base(persistent.Persistent):
+
+    _z_parent__ = parent = None
+
+    # we use ``parent`` for our data structures.  As a convenience, we
+    # support the ``__parent__`` attribute used by most security policies so
+    # that ``__parent__`` uses ``parent`` unless __parent__ is explicitly set.
+    @property
+    def __parent__(self):
+        if self._z_parent__ is not None:
+            return self._z_parent__
+        return self.parent
+    @rwproperty.setproperty
+    def __parent__(self, value):
+        self._z_parent__ = None
+
+
+class Atom(persistent.Persistent):
+    def __init__(self, value):
+        self.value = value
+
+    def __getstate__(self):
+        return self.value
+
+    def __setstate__(self, state):
+        self.value = state
+
+class AtomDescriptor(object):
+    def __init__(self, name, initial=None):
+        self.name = name
+        self.initial = initial
+
+    def __get__(self, obj, klass=None):
+        if obj is None:
+            return self
+        return obj.__dict__[self.name].value
+
+    def __set__(self, obj, value):
+        obj.__dict__[self.name].value = value
+
+    def initialize(self, obj):
+        obj.__dict__[self.name] = Atom(self.initial)
+
+def createAtom(name, initial):
+    sys._getframe(1).f_locals[name] = AtomDescriptor(name, initial)
+
+
+class Dict(zc.dict.Dict, Base):
+    
+    copy = None # mask
+
+    def __setitem__(self, key, value):
+        previous = self.get(key)
+        super(Dict, self).__setitem__(key, value)
+        value.name = key
+        value.parent = self
+        if previous is not None:
+            previous.parent = previous.name = None
+
+    def pop(self, key, *args):
+        try:
+            res = super(Dict, self).pop(key)
+        except KeyError:
+            if args:
+                return args[0]
+            else:
+                raise
+        res.parent = None
+        res.name = None
+        return res
+
+def dt_to_long(dt):
+    # 4 low bits, 0-15, will be discarded and can be set, if desired
+    # newer dates are smaller than older, so BTrees sort from newer to older
+    if dt.tzinfo is not None:
+        dt = dt.astimezone(pytz.UTC).replace(tzinfo=None)
+    delta = datetime.datetime.max - dt
+    return (delta.days << 41 | delta.seconds << 24 | delta.microseconds << 4)
+
+def long_to_dt(l):
+    microseconds = (l >> 4) & (2**20-1)
+    seconds = (l >> 24) & (2**17-1)
+    days = (l >> 41)
+    return (datetime.datetime.max -
+            datetime.timedelta(days, seconds, microseconds))
+
+class Periodic(persistent.Persistent):
+    # sorts on begin_after from newest to oldest
+
+    __parent__ = None
+
+    def __init__(self, period, buckets):
+        self._data = zope.bforest.periodic.LOBForest(period, count=buckets)
+
+    def clear(self):
+        self._data.clear()
+
+    @property
+    def period(self):
+        return self._data.period
+    @rwproperty.setproperty
+    def period(self, value):
+        self._data.period = value
+
+    def add(self, item):
+        key = zc.async.utils.dt_to_long(datetime.datetime.utcnow()) + 15
+        while key in self._data:
+            key -= 1
+        self._data[key] = item
+        item.parent = self.__parent__ # the agent
+        item.key = key
+
+    def iter(self, start=None, stop=None):
+        sources = []
+        if start is not None:
+            start = zc.async.utils.dt_to_long(start)
+        if stop is not None:
+            stop = zc.async.utils.dt_to_long(stop)
+        for b in self._data.buckets:
+            i = iter(b.items(start, stop))
+            try:
+                n = i.next()
+            except StopIteration:
+                pass
+            else:
+                sources.append([n, i])
+        sources.sort()
+        length = len(sources)
+        while length > 1:
+            src = sources.pop(0)
+            yield src[0][1]
+            try:
+                src[0] = src[1].next()
+            except StopIteration:
+                length -= 1
+            else:
+                bisect.insort(sources, src)
+        if sources:
+            yield sources[0][0][1]
+            for k, v in sources[0][1]:
+                yield v
+
+    def __iter__(self):
+        return self._data.itervalues() # this takes more memory but the pattern
+        # is typically faster than the custom iter above (for relatively
+        # complete iterations of relatively small sets).  The custom iter
+        # has the advantage of the start and stop code.
+
+    def first(self, start=None):
+        original = start
+        if start is not None:
+            start = zc.async.utils.dt_to_long(start)
+            minKey = lambda bkt: bkt.minKey(start)
+        else:
+            minKey = lambda bkt: bkt.minKey()
+        i = iter(self._data.buckets)
+        bucket = i.next()
+        try:
+            key = minKey(bucket)
+        except ValueError:
+            key = None
+        for b in i:
+            try:
+                k = minKey(b)
+            except ValueError:
+                continue
+            if key is None or k < key:
+                bucket, key = b, k
+        if key is None:
+            raise ValueError(original)
+        return bucket[key]
+
+    def last(self, stop=None):
+        original = stop
+        if stop is not None:
+            stop = zc.async.utils.dt_to_long(stop)
+            maxKey = lambda bkt: bkt.maxKey(stop)
+        else:
+            maxKey = lambda bkt: bkt.maxKey()
+        i = iter(self._data.buckets)
+        bucket = i.next()
+        try:
+            key = maxKey(bucket)
+        except ValueError:
+            key = None
+        for b in i:
+            try:
+                k = maxKey(b)
+            except ValueError:
+                continue
+            if key is None or k > key:
+                bucket, key = b, k
+        if key is None:
+            raise ValueError(original)
+        return bucket[key]
+
+    def __nonzero__(self):
+        for b in self._data.buckets:
+            for ignore in b:
+                return True
+        return False
+
+    def __len__(self):
+        return len(self._data)

Copied: zc.async/trunk/src/zc/async/z3tests.py (from rev 85211, zc.async/branches/dev/src/zc/async/z3tests.py)
===================================================================
--- zc.async/trunk/src/zc/async/z3tests.py	                        (rev 0)
+++ zc.async/trunk/src/zc/async/z3tests.py	2008-04-10 03:21:01 UTC (rev 85212)
@@ -0,0 +1,35 @@
+import os
+import unittest
+from zope.testing import doctest, module
+import zope.component.testing
+import zc.ngi.async # to quiet the thread complaints from the testing
+# infrastructure, because there is no API way to stop the z3monitor server or
+# the zc.ngi.async thread. :-(
+
+import zc.async.tests
+
+def setUp(test):
+    zc.async.tests.modSetUp(test)
+    # make the uuid stable for these tests
+    f = open(os.environ["ZC_ASYNC_UUID"], 'w')
+    # make this stable for test purposes
+    f.writelines(('d10f43dc-ffdf-11dc-abd4-0017f2c49bdd',))
+    f.close()
+    zc.async.instanceuuid.UUID = zc.async.instanceuuid.getUUID()
+
+def test_suite():
+    return unittest.TestSuite((
+        doctest.DocFileSuite(
+            'monitor.txt',
+            setUp=setUp, tearDown=zc.async.tests.modTearDown,
+            optionflags=doctest.INTERPRET_FOOTNOTES),
+        doctest.DocFileSuite(
+            'README_3.txt',
+            setUp=zope.component.testing.setUp,
+            tearDown=zope.component.testing.tearDown,
+            optionflags=doctest.INTERPRET_FOOTNOTES),
+        ))
+
+
+if __name__ == '__main__':
+    unittest.main(defaultTest='test_suite')



More information about the Checkins mailing list