[Checkins] SVN: Sandbox/wichert/tags/ Tag private release

Wichert Akkerman wichert at wiggy.net
Mon Sep 22 12:53:28 EDT 2008


Log message for revision 91361:
  Tag private release

Changed:
  A   Sandbox/wichert/tags/
  A   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/
  U   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/setup.py
  D   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/Connection.py
  A   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/Connection.py
  D   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/DB.py
  A   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/DB.py
  D   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/component.xml
  A   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/component.xml
  D   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/config.py
  A   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/config.py
  D   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/tests/testConnection.py
  A   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/tests/testConnection.py
  D   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPersistence.c
  A   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPersistence.c
  D   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPersistence.h
  A   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPersistence.h
  D   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPickleCache.c
  A   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPickleCache.c
  D   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/tests/persistent.txt
  A   Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/tests/persistent.txt

-=-
Copied: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1 (from rev 91359, Sandbox/wichert/ZODB38-jarn)


Property changes on: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1
___________________________________________________________________
Name: svn:ignore
   + build
eggs
.installed.cfg
dist
testing.log
develop-eggs
parts
bin

Name: svn:externals
   + 

Name: svn:mergeinfo
   + 

Modified: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/setup.py
===================================================================
--- Sandbox/wichert/ZODB38-jarn/setup.py	2008-09-22 16:32:13 UTC (rev 91359)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/setup.py	2008-09-22 16:53:27 UTC (rev 91361)
@@ -20,7 +20,7 @@
 interface, rich transaction support, and undo.
 """
 
-VERSION = "3.8.1b8"
+VERSION = "3.8.1b8.jarn.1"
 
 # The (non-obvious!) choices for the Trove Development Status line:
 # Development Status :: 5 - Production/Stable

Deleted: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/Connection.py
===================================================================
--- Sandbox/wichert/ZODB38-jarn/src/ZODB/Connection.py	2008-09-22 16:32:13 UTC (rev 91359)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/Connection.py	2008-09-22 16:53:27 UTC (rev 91361)
@@ -1,1296 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Database connection support
-
-$Id$"""
-
-import logging
-import sys
-import tempfile
-import threading
-import warnings
-import os
-import shutil
-from time import time
-
-from persistent import PickleCache
-
-# interfaces
-from persistent.interfaces import IPersistentDataManager
-from ZODB.interfaces import IConnection
-from ZODB.interfaces import IBlobStorage
-from ZODB.blob import Blob, rename_or_copy_blob
-from transaction.interfaces import ISavepointDataManager
-from transaction.interfaces import IDataManagerSavepoint
-from transaction.interfaces import ISynchronizer
-from zope.interface import implements
-
-import transaction
-
-from ZODB.blob import SAVEPOINT_SUFFIX
-from ZODB.ConflictResolution import ResolvedSerial
-from ZODB.ExportImport import ExportImport
-from ZODB import POSException
-from ZODB.POSException import InvalidObjectReference, ConnectionStateError
-from ZODB.POSException import ConflictError, ReadConflictError
-from ZODB.POSException import Unsupported
-from ZODB.POSException import POSKeyError
-from ZODB.serialize import ObjectWriter, ObjectReader, myhasattr
-from ZODB.utils import p64, u64, z64, oid_repr, positive_id
-from ZODB import utils
-
-global_reset_counter = 0
-
-def resetCaches():
-    """Causes all connection caches to be reset as connections are reopened.
-
-    Zope's refresh feature uses this.  When you reload Python modules,
-    instances of classes continue to use the old class definitions.
-    To use the new code immediately, the refresh feature asks ZODB to
-    clear caches by calling resetCaches().  When the instances are
-    loaded by subsequent connections, they will use the new class
-    definitions.
-    """
-    global global_reset_counter
-    global_reset_counter += 1
-
-class Connection(ExportImport, object):
-    """Connection to ZODB for loading and storing objects."""
-
-    implements(IConnection,
-               ISavepointDataManager,
-               IPersistentDataManager,
-               ISynchronizer)
-
-
-    _code_timestamp = 0
-
-    ##########################################################################
-    # Connection methods, ZODB.IConnection
-
-    def __init__(self, db, version='', cache_size=400):
-        """Create a new Connection."""
-
-        self._log = logging.getLogger('ZODB.Connection')
-        self._debug_info = ()
-
-        self._db = db
-        # Multi-database support
-        self.connections = {self._db.database_name: self}
-
-        self._version = version
-        self._normal_storage = self._storage = db._storage
-        self.new_oid = db._storage.new_oid
-        self._savepoint_storage = None
-
-        # Do we need to join a txn manager?
-        self._needs_to_join = True
-        self.transaction_manager = None
-        self._opened = None # time.time() when DB.open() opened us
-
-        self._reset_counter = global_reset_counter
-        self._load_count = 0   # Number of objects unghosted
-        self._store_count = 0  # Number of objects stored
-
-        # Cache which can ghostify (forget the state of) objects not
-        # recently used. Its API is roughly that of a dict, with
-        # additional gc-related and invalidation-related methods.
-        self._cache = PickleCache(self, cache_size)
-
-        # The pre-cache is used by get to avoid infinite loops when
-        # objects immediately load their state whern they get their
-        # persistent data set.
-        self._pre_cache = {}
-        
-        if version:
-            # Caches for versions end up empty if the version
-            # is not used for a while. Non-version caches
-            # keep their content indefinitely.
-            # Unclear:  Why do we want version caches to behave this way?
-            self._cache.cache_drain_resistance = 100
-
-        # List of all objects (not oids) registered as modified by the
-        # persistence machinery, or by add(), or whose access caused a
-        # ReadConflictError (just to be able to clean them up from the
-        # cache on abort with the other modified objects). All objects
-        # of this list are either in _cache or in _added.
-        self._registered_objects = []
-
-        # Dict of oid->obj added explicitly through add(). Used as a
-        # preliminary cache until commit time when objects are all moved
-        # to the real _cache. The objects are moved to _creating at
-        # commit time.
-        self._added = {}
-
-        # During commit this is turned into a list, which receives
-        # objects added as a side-effect of storing a modified object.
-        self._added_during_commit = None
-
-        # During commit, all objects go to either _modified or _creating:
-
-        # Dict of oid->flag of new objects (without serial), either
-        # added by add() or implicitely added (discovered by the
-        # serializer during commit). The flag is True for implicit
-        # adding. Used during abort to remove created objects from the
-        # _cache, and by persistent_id to check that a new object isn't
-        # reachable from multiple databases.
-        self._creating = {}
-
-        # List of oids of modified objects, which have to be invalidated
-        # in the cache on abort and in other connections on finish.
-        self._modified = []
-
-
-        # _invalidated queues invalidate messages delivered from the DB
-        # _inv_lock prevents one thread from modifying the set while
-        # another is processing invalidations.  All the invalidations
-        # from a single transaction should be applied atomically, so
-        # the lock must be held when reading _invalidated.
-
-        # It sucks that we have to hold the lock to read _invalidated.
-        # Normally, _invalidated is written by calling dict.update, which
-        # will execute atomically by virtue of the GIL.  But some storage
-        # might generate oids where hash or compare invokes Python code.  In
-        # that case, the GIL can't save us.
-        # Note:  since that was written, it was officially declared that the
-        # type of an oid is str.  TODO:  remove the related now-unnecessary
-        # critical sections (if any -- this needs careful thought).
-
-        self._inv_lock = threading.Lock()
-        self._invalidated = set()
-
-        # Flag indicating whether the cache has been invalidated:
-        self._invalidatedCache = False
-
-        # We intend to prevent committing a transaction in which
-        # ReadConflictError occurs.  _conflicts is the set of oids that
-        # experienced ReadConflictError.  Any time we raise ReadConflictError,
-        # the oid should be added to this set, and we should be sure that the
-        # object is registered.  Because it's registered, Connection.commit()
-        # will raise ReadConflictError again (because the oid is in
-        # _conflicts).
-        self._conflicts = {}
-
-        # If MVCC is enabled, then _mvcc is True and _txn_time stores
-        # the upper bound on transactions visible to this connection.
-        # That is, all object revisions must be written before _txn_time.
-        # If it is None, then the current revisions are acceptable.
-        # If the connection is in a version, mvcc will be disabled, because
-        # loadBefore() only returns non-version data.
-        self._txn_time = None
-
-        # To support importFile(), implemented in the ExportImport base
-        # class, we need to run _importDuringCommit() from our commit()
-        # method.  If _import is not None, it is a two-tuple of arguments
-        # to pass to _importDuringCommit().
-        self._import = None
-
-        self._reader = ObjectReader(self, self._cache, self._db.classFactory)
-
-
-    def add(self, obj):
-        """Add a new object 'obj' to the database and assign it an oid."""
-        if self._opened is None:
-            raise ConnectionStateError("The database connection is closed")
-
-        marker = object()
-        oid = getattr(obj, "_p_oid", marker)
-        if oid is marker:
-            raise TypeError("Only first-class persistent objects may be"
-                            " added to a Connection.", obj)
-        elif obj._p_jar is None:
-            assert obj._p_oid is None
-            oid = obj._p_oid = self._storage.new_oid()
-            obj._p_jar = self
-            if self._added_during_commit is not None:
-                self._added_during_commit.append(obj)
-            self._register(obj)
-            # Add to _added after calling register(), so that _added
-            # can be used as a test for whether the object has been
-            # registered with the transaction.
-            self._added[oid] = obj
-        elif obj._p_jar is not self:
-            raise InvalidObjectReference(obj, obj._p_jar)
-
-    def get(self, oid):
-        """Return the persistent object with oid 'oid'."""
-        if self._opened is None:
-            raise ConnectionStateError("The database connection is closed")
-
-        obj = self._cache.get(oid, None)
-        if obj is not None:
-            return obj
-        obj = self._added.get(oid, None)
-        if obj is not None:
-            return obj        
-        obj = self._pre_cache.get(oid, None)
-        if obj is not None:
-            return obj
-
-        # This appears to be an MVCC violation because we are loading
-        # the must recent data when perhaps we shouldnt. The key is
-        # that we are only creating a ghost!        
-        p, serial = self._storage.load(oid, self._version)
-        obj = self._reader.getGhost(p)
-
-        # Avoid infiniate loop if obj tries to load its state before
-        # it is added to the cache and it's state refers to it.
-        self._pre_cache[oid] = obj
-        obj._p_oid = oid
-        obj._p_jar = self
-        obj._p_changed = None
-        obj._p_serial = serial
-        self._pre_cache.pop(oid)
-        self._cache[oid] = obj
-        return obj
-
-    def cacheMinimize(self):
-        """Deactivate all unmodified objects in the cache."""
-        self._cache.minimize()
-
-    # TODO: we should test what happens when cacheGC is called mid-transaction.
-    def cacheGC(self):
-        """Reduce cache size to target size."""
-        self._cache.incrgc()
-
-    __onCloseCallbacks = None
-    def onCloseCallback(self, f):
-        """Register a callable, f, to be called by close()."""
-        if self.__onCloseCallbacks is None:
-            self.__onCloseCallbacks = []
-        self.__onCloseCallbacks.append(f)
-
-    def close(self, primary=True):
-        """Close the Connection."""
-        if not self._needs_to_join:
-            # We're currently joined to a transaction.
-            raise ConnectionStateError("Cannot close a connection joined to "
-                                       "a transaction")
-
-        if self._cache is not None:
-            self._cache.incrgc() # This is a good time to do some GC
-
-        # Call the close callbacks.
-        if self.__onCloseCallbacks is not None:
-            for f in self.__onCloseCallbacks:
-                try:
-                    f()
-                except: # except what?
-                    f = getattr(f, 'im_self', f)
-                    self._log.error("Close callback failed for %s", f,
-                                    exc_info=sys.exc_info())
-            self.__onCloseCallbacks = None
-
-        self._debug_info = ()
-
-        if self._opened:
-            self.transaction_manager.unregisterSynch(self)
-
-        if primary:
-            for connection in self.connections.values():
-                if connection is not self:
-                    connection.close(False)
-
-            # Return the connection to the pool.
-            if self._opened is not None:
-                self._db._returnToPool(self)
-
-                # _returnToPool() set self._opened to None.
-                # However, we can't assert that here, because self may
-                # have been reused (by another thread) by the time we
-                # get back here.
-        else:
-            self._opened = None
-            am = self._db._activity_monitor
-            if am is not None:
-                am.closedConnection(self)
-
-    def db(self):
-        """Returns a handle to the database this connection belongs to."""
-        return self._db
-
-    def isReadOnly(self):
-        """Returns True if the storage for this connection is read only."""
-        if self._opened is None:
-            raise ConnectionStateError("The database connection is closed")
-        return self._storage.isReadOnly()
-
-    def invalidate(self, tid, oids):
-        """Notify the Connection that transaction 'tid' invalidated oids."""
-        self._inv_lock.acquire()
-        try:
-            if self._txn_time is None:
-                self._txn_time = tid
-            self._invalidated.update(oids)
-        finally:
-            self._inv_lock.release()
-
-    def invalidateCache(self):
-        self._inv_lock.acquire()
-        try:
-            self._invalidatedCache = True
-        finally:
-            self._inv_lock.release()
-        
-
-    def root(self):
-        """Return the database root object."""
-        return self.get(z64)
-
-    def getVersion(self):
-        """Returns the version this connection is attached to."""
-        if self._storage is None:
-            raise ConnectionStateError("The database connection is closed")
-        return self._version
-
-    def get_connection(self, database_name):
-        """Return a Connection for the named database."""
-        connection = self.connections.get(database_name)
-        if connection is None:
-            new_con = self._db.databases[database_name].open(
-                transaction_manager=self.transaction_manager,
-                version=self._version,
-                )
-            self.connections.update(new_con.connections)
-            new_con.connections = self.connections
-            connection = new_con
-        return connection
-
-    def _implicitlyAdding(self, oid):
-        """Are we implicitly adding an object within the current transaction
-
-        This is used in a check to avoid implicitly adding an object
-        to a database in a multi-database situation.
-        See serialize.ObjectWriter.persistent_id.
-
-        """
-        return (self._creating.get(oid, 0)
-                or
-                ((self._savepoint_storage is not None)
-                 and
-                 self._savepoint_storage.creating.get(oid, 0)
-                 )
-                )
-
-    def sync(self):
-        """Manually update the view on the database."""
-        self.transaction_manager.abort()
-        self._storage_sync()
-
-    def getDebugInfo(self):
-        """Returns a tuple with different items for debugging the
-        connection.
-        """
-        return self._debug_info
-
-    def setDebugInfo(self, *args):
-        """Add the given items to the debug information of this connection."""
-        self._debug_info = self._debug_info + args
-
-    def getTransferCounts(self, clear=False):
-        """Returns the number of objects loaded and stored."""
-        res = self._load_count, self._store_count
-        if clear:
-            self._load_count = 0
-            self._store_count = 0
-        return res
-
-    # Connection methods
-    ##########################################################################
-
-    ##########################################################################
-    # Data manager (ISavepointDataManager) methods
-
-    def abort(self, transaction):
-        """Abort a transaction and forget all changes."""
-
-        # The order is important here.  We want to abort registered
-        # objects before we process the cache.  Otherwise, we may un-add
-        # objects added in savepoints.  If they've been modified since
-        # the savepoint, then they won't have _p_oid or _p_jar after
-        # they've been unadded. This will make the code in _abort
-        # confused.
-
-        self._abort()
-
-        if self._savepoint_storage is not None:
-            self._abort_savepoint()
-
-        self._tpc_cleanup()
-
-    def _abort(self):
-        """Abort a transaction and forget all changes."""
-
-        for obj in self._registered_objects:
-            oid = obj._p_oid
-            assert oid is not None
-            if oid in self._added:
-                del self._added[oid]
-                del obj._p_jar
-                del obj._p_oid
-            else:
-
-                # Note: If we invalidate a non-ghostifiable object
-                # (i.e. a persistent class), the object will
-                # immediately reread its state.  That means that the
-                # following call could result in a call to
-                # self.setstate, which, of course, must succeed.
-                # In general, it would be better if the read could be
-                # delayed until the start of the next transaction.  If
-                # we read at the end of a transaction and if the
-                # object was invalidated during this transaction, then
-                # we'll read non-current data, which we'll discard
-                # later in transaction finalization.  Unfortnately, we
-                # can only delay the read if this abort corresponds to
-                # a top-level-transaction abort.  We can't tell if
-                # this is a top-level-transaction abort, so we have to
-                # go ahead and invalidate now.  Fortunately, it's
-                # pretty unlikely that the object we are invalidating
-                # was invalidated by another thread, so the risk of a
-                # reread is pretty low.
-
-                self._cache.invalidate(oid)
-
-    def _tpc_cleanup(self):
-        """Performs cleanup operations to support tpc_finish and tpc_abort."""
-        self._conflicts.clear()
-        self._needs_to_join = True
-        self._registered_objects = []
-        self._creating.clear()
-
-    # Process pending invalidations.
-    def _flush_invalidations(self):
-        self._inv_lock.acquire()
-        try:
-            # Non-ghostifiable objects may need to read when they are
-            # invalidated, so we'll quickly just replace the
-            # invalidating dict with a new one.  We'll then process
-            # the invalidations after freeing the lock *and* after
-            # resetting the time.  This means that invalidations will
-            # happen after the start of the transactions.  They are
-            # subject to conflict errors and to reading old data.
-
-            # TODO: There is a potential problem lurking for persistent
-            # classes.  Suppose we have an invalidation of a persistent
-            # class and of an instance.  If the instance is
-            # invalidated first and if the invalidation logic uses
-            # data read from the class, then the invalidation could
-            # be performed with stale data.  Or, suppose that there
-            # are instances of the class that are freed as a result of
-            # invalidating some object.  Perhaps code in their __del__
-            # uses class data.  Really, the only way to properly fix
-            # this is to, in fact, make classes ghostifiable.  Then
-            # we'd have to reimplement attribute lookup to check the
-            # class state and, if necessary, activate the class.  It's
-            # much worse than that though, because we'd also need to
-            # deal with slots.  When a class is ghostified, we'd need
-            # to replace all of the slot operations with versions that
-            # reloaded the object when called. It's hard to say which
-            # is better or worse.  For now, it seems the risk of
-            # using a class while objects are being invalidated seems
-            # small enough to be acceptable.
-
-            invalidated = dict.fromkeys(self._invalidated)
-            self._invalidated = set()
-            self._txn_time = None
-            if self._invalidatedCache:
-                self._invalidatedCache = False
-                invalidated = self._cache.cache_data.copy()
-        finally:
-            self._inv_lock.release()
-
-        self._cache.invalidate(invalidated)
-
-        # Now is a good time to collect some garbage.
-        self._cache.incrgc()
-
-    def tpc_begin(self, transaction):
-        """Begin commit of a transaction, starting the two-phase commit."""
-        self._modified = []
-
-        # _creating is a list of oids of new objects, which is used to
-        # remove them from the cache if a transaction aborts.
-        self._creating.clear()
-        self._normal_storage.tpc_begin(transaction)
-
-    def commit(self, transaction):
-        """Commit changes to an object"""
-
-        if self._savepoint_storage is not None:
-
-            # We first checkpoint the current changes to the savepoint
-            self.savepoint()
-
-            # then commit all of the savepoint changes at once
-            self._commit_savepoint(transaction)
-
-            # No need to call _commit since savepoint did.
-
-        else:
-            self._commit(transaction)
-
-    def _commit(self, transaction):
-        """Commit changes to an object"""
-
-        if self._import:
-            # We are importing an export file. We alsways do this
-            # while making a savepoint so we can copy export data
-            # directly to our storage, typically a TmpStore.
-            self._importDuringCommit(transaction, *self._import)
-            self._import = None
-
-        # Just in case an object is added as a side-effect of storing
-        # a modified object.  If, for example, a __getstate__() method
-        # calls add(), the newly added objects will show up in
-        # _added_during_commit.  This sounds insane, but has actually
-        # happened.
-
-        self._added_during_commit = []
-
-        if self._invalidatedCache:
-            raise ConflictError()            
-
-        for obj in self._registered_objects:
-            oid = obj._p_oid
-            assert oid
-            if oid in self._conflicts:
-                raise ReadConflictError(object=obj)
-
-            if obj._p_jar is not self:
-                raise InvalidObjectReference(obj, obj._p_jar)
-            elif oid in self._added:
-                assert obj._p_serial == z64
-            elif obj._p_changed:
-                if oid in self._invalidated:
-                    resolve = getattr(obj, "_p_resolveConflict", None)
-                    if resolve is None:
-                        raise ConflictError(object=obj)
-                self._modified.append(oid)
-            else:
-                # Nothing to do.  It's been said that it's legal, e.g., for
-                # an object to set _p_changed to false after it's been
-                # changed and registered.
-                continue
-
-            self._store_objects(ObjectWriter(obj), transaction)
-
-        for obj in self._added_during_commit:
-            self._store_objects(ObjectWriter(obj), transaction)
-        self._added_during_commit = None
-
-    def _store_objects(self, writer, transaction):
-        for obj in writer:
-            oid = obj._p_oid
-            serial = getattr(obj, "_p_serial", z64)
-
-            if ((serial == z64)
-                and
-                ((self._savepoint_storage is None)
-                 or (oid not in self._savepoint_storage.creating)
-                 or self._savepoint_storage.creating[oid]
-                 )
-                ):
-                
-                # obj is a new object
-
-                # Because obj was added, it is now in _creating, so it
-                # can be removed from _added.  If oid wasn't in
-                # adding, then we are adding it implicitly.
-
-                implicitly_adding = self._added.pop(oid, None) is None
-
-                self._creating[oid] = implicitly_adding
-
-            else:
-                if (oid in self._invalidated
-                    and not hasattr(obj, '_p_resolveConflict')):
-                    raise ConflictError(object=obj)
-                self._modified.append(oid)
-            p = writer.serialize(obj)  # This calls __getstate__ of obj
-
-            if isinstance(obj, Blob):
-                if not IBlobStorage.providedBy(self._storage):
-                    raise Unsupported(
-                        "Storing Blobs in %s is not supported." % 
-                        repr(self._storage))
-                if obj.opened():
-                    raise ValueError("Can't commit with opened blobs.")
-                s = self._storage.storeBlob(oid, serial, p,
-                                            obj._uncommitted(),
-                                            self._version, transaction)
-                # we invalidate the object here in order to ensure
-                # that that the next attribute access of its name
-                # unghostify it, which will cause its blob data
-                # to be reattached "cleanly"
-                obj._p_invalidate()
-            else:
-                s = self._storage.store(oid, serial, p, self._version,
-                                        transaction)
-            self._store_count += 1
-            # Put the object in the cache before handling the
-            # response, just in case the response contains the
-            # serial number for a newly created object
-            try:
-                self._cache[oid] = obj
-            except:
-                # Dang, I bet it's wrapped:
-                # TODO:  Deprecate, then remove, this.
-                if hasattr(obj, 'aq_base'):
-                    self._cache[oid] = obj.aq_base
-                else:
-                    raise
-
-            self._handle_serial(s, oid)
-
-    def _handle_serial(self, store_return, oid=None, change=1):
-        """Handle the returns from store() and tpc_vote() calls."""
-
-        # These calls can return different types depending on whether
-        # ZEO is used.  ZEO uses asynchronous returns that may be
-        # returned in batches by the ClientStorage.  ZEO1 can also
-        # return an exception object and expect that the Connection
-        # will raise the exception.
-
-        # When conflict resolution occurs, the object state held by
-        # the connection does not match what is written to the
-        # database.  Invalidate the object here to guarantee that
-        # the new state is read the next time the object is used.
-
-        if not store_return:
-            return
-        if isinstance(store_return, str):
-            assert oid is not None
-            self._handle_one_serial(oid, store_return, change)
-        else:
-            for oid, serial in store_return:
-                self._handle_one_serial(oid, serial, change)
-
-    def _handle_one_serial(self, oid, serial, change):
-        if not isinstance(serial, str):
-            raise serial
-        obj = self._cache.get(oid, None)
-        if obj is None:
-            return
-        if serial == ResolvedSerial:
-            del obj._p_changed # transition from changed to ghost
-        else:
-            if change:
-                obj._p_changed = 0 # transition from changed to up-to-date
-            obj._p_serial = serial
-
-    def tpc_abort(self, transaction):
-        if self._import:
-            self._import = None
-
-        if self._savepoint_storage is not None:
-            self._abort_savepoint()
-
-        self._storage.tpc_abort(transaction)
-
-        # Note: If we invalidate a non-ghostifiable object (i.e. a
-        # persistent class), the object will immediately reread its
-        # state.  That means that the following call could result in a
-        # call to self.setstate, which, of course, must succeed.  In
-        # general, it would be better if the read could be delayed
-        # until the start of the next transaction.  If we read at the
-        # end of a transaction and if the object was invalidated
-        # during this transaction, then we'll read non-current data,
-        # which we'll discard later in transaction finalization.  We
-        # could, theoretically queue this invalidation by calling
-        # self.invalidate.  Unfortunately, attempts to make that
-        # change resulted in mysterious test failures.  It's pretty
-        # unlikely that the object we are invalidating was invalidated
-        # by another thread, so the risk of a reread is pretty low.
-        # It's really not worth the effort to pursue this.
-
-        self._cache.invalidate(self._modified)
-        self._invalidate_creating()
-        while self._added:
-            oid, obj = self._added.popitem()
-            del obj._p_oid
-            del obj._p_jar
-        self._tpc_cleanup()
-
-    def _invalidate_creating(self, creating=None):
-        """Disown any objects newly saved in an uncommitted transaction."""
-        if creating is None:
-            creating = self._creating
-            self._creating = {}
-
-        for oid in creating:
-            o = self._cache.get(oid)
-            if o is not None:
-                del self._cache[oid]
-                del o._p_jar
-                del o._p_oid
-
-    def tpc_vote(self, transaction):
-        """Verify that a data manager can commit the transaction."""
-        try:
-            vote = self._storage.tpc_vote
-        except AttributeError:
-            return
-        s = vote(transaction)
-        self._handle_serial(s)
-
-    def tpc_finish(self, transaction):
-        """Indicate confirmation that the transaction is done."""
-
-        def callback(tid):
-            d = dict.fromkeys(self._modified)
-            self._db.invalidate(tid, d, self)
-#       It's important that the storage calls the passed function
-#       while it still has its lock.  We don't want another thread
-#       to be able to read any updated data until we've had a chance
-#       to send an invalidation message to all of the other
-#       connections!
-        self._storage.tpc_finish(transaction, callback)
-        self._tpc_cleanup()
-
-    def sortKey(self):
-        """Return a consistent sort key for this connection."""
-        return "%s:%s" % (self._storage.sortKey(), id(self))
-
-    # Data manager (ISavepointDataManager) methods
-    ##########################################################################
-
-    ##########################################################################
-    # Transaction-manager synchronization -- ISynchronizer
-
-    def beforeCompletion(self, txn):
-        # We don't do anything before a commit starts.
-        pass
-
-    # Call the underlying storage's sync() method (if any), and process
-    # pending invalidations regardless.  Of course this should only be
-    # called at transaction boundaries.
-    def _storage_sync(self, *ignored):
-        sync = getattr(self._storage, 'sync', 0)
-        if sync:
-            sync()
-        self._flush_invalidations()
-
-    afterCompletion =  _storage_sync
-    newTransaction = _storage_sync
-
-     # Transaction-manager synchronization -- ISynchronizer
-    ##########################################################################
-
-    ##########################################################################
-    # persistent.interfaces.IPersistentDatamanager
-
-    def oldstate(self, obj, tid):
-        """Return copy of 'obj' that was written by transaction 'tid'."""
-        assert obj._p_jar is self
-        p = self._storage.loadSerial(obj._p_oid, tid)
-        return self._reader.getState(p)
-
-    def setstate(self, obj):
-        """Turns the ghost 'obj' into a real object by loading its state from
-        the database."""
-        oid = obj._p_oid
-
-        if self._opened is None:
-            msg = ("Shouldn't load state for %s "
-                   "when the connection is closed" % oid_repr(oid))
-            self._log.error(msg)
-            raise ConnectionStateError(msg)
-
-        try:
-            self._setstate(obj)
-        except ConflictError:
-            raise
-        except:
-            self._log.error("Couldn't load state for %s", oid_repr(oid),
-                            exc_info=sys.exc_info())
-            raise
-
-    def _setstate(self, obj):
-        # Helper for setstate(), which provides logging of failures.
-
-        # The control flow is complicated here to avoid loading an
-        # object revision that we are sure we aren't going to use.  As
-        # a result, invalidation tests occur before and after the
-        # load.  We can only be sure about invalidations after the
-        # load.
-
-        # If an object has been invalidated, there are several cases
-        # to consider:
-        # 1. Check _p_independent()
-        # 2. Try MVCC
-        # 3. Raise ConflictError.
-
-        # Does anything actually use _p_independent()?  It would simplify
-        # the code if we could drop support for it.  
-        # (BTrees.Length does.)
-
-        # There is a harmless data race with self._invalidated.  A
-        # dict update could go on in another thread, but we don't care
-        # because we have to check again after the load anyway.
-
-
-        if self._invalidatedCache:
-            raise ReadConflictError()
-
-        if (obj._p_oid in self._invalidated and
-                not myhasattr(obj, "_p_independent")):
-            # If the object has _p_independent(), we will handle it below.
-            self._load_before_or_conflict(obj)
-            return
-
-        p, serial = self._storage.load(obj._p_oid, self._version)
-        self._load_count += 1
-
-        self._inv_lock.acquire()
-        try:
-            invalid = obj._p_oid in self._invalidated
-        finally:
-            self._inv_lock.release()
-
-        if invalid:
-            if myhasattr(obj, "_p_independent"):
-                # This call will raise a ReadConflictError if something
-                # goes wrong
-                self._handle_independent(obj)
-            else:
-                self._load_before_or_conflict(obj)
-                return
-
-        self._reader.setGhostState(obj, p)
-        obj._p_serial = serial
-
-        # Blob support
-        if isinstance(obj, Blob):
-            obj._p_blob_uncommitted = None
-            obj._p_blob_committed = self._storage.loadBlob(obj._p_oid, serial)
-
-    def _load_before_or_conflict(self, obj):
-        """Load non-current state for obj or raise ReadConflictError."""
-        if not ((not self._version) and self._setstate_noncurrent(obj)):
-            self._register(obj)
-            self._conflicts[obj._p_oid] = True
-            raise ReadConflictError(object=obj)
-
-    def _setstate_noncurrent(self, obj):
-        """Set state using non-current data.
-
-        Return True if state was available, False if not.
-        """
-        try:
-            # Load data that was current before the commit at txn_time.
-            t = self._storage.loadBefore(obj._p_oid, self._txn_time)
-        except KeyError:
-            return False
-        if t is None:
-            return False
-        data, start, end = t
-        # The non-current transaction must have been written before
-        # txn_time.  It must be current at txn_time, but could have
-        # been modified at txn_time.
-
-        assert start < self._txn_time, (u64(start), u64(self._txn_time))
-        assert end is not None
-        assert self._txn_time <= end, (u64(self._txn_time), u64(end))
-        self._reader.setGhostState(obj, data)
-        obj._p_serial = start
-        return True
-
-    def _handle_independent(self, obj):
-        # Helper method for setstate() handles possibly independent objects
-        # Call _p_independent(), if it returns True, setstate() wins.
-        # Otherwise, raise a ConflictError.
-
-        if obj._p_independent():
-            self._inv_lock.acquire()
-            try:
-                try:
-                    self._invalidated.remove(obj._p_oid)
-                except KeyError:
-                    pass
-            finally:
-                self._inv_lock.release()
-        else:
-            self._conflicts[obj._p_oid] = 1
-            self._register(obj)
-            raise ReadConflictError(object=obj)
-
-    def register(self, obj):
-        """Register obj with the current transaction manager.
-
-        A subclass could override this method to customize the default
-        policy of one transaction manager for each thread.
-
-        obj must be an object loaded from this Connection.
-        """
-        assert obj._p_jar is self
-        if obj._p_oid is None:
-            # The actual complaint here is that an object without
-            # an oid is being registered.  I can't think of any way to
-            # achieve that without assignment to _p_jar.  If there is
-            # a way, this will be a very confusing exception.
-            raise ValueError("assigning to _p_jar is not supported")
-        elif obj._p_oid in self._added:
-            # It was registered before it was added to _added.
-            return
-        self._register(obj)
-
-    def _register(self, obj=None):
-
-        # The order here is important.  We need to join before
-        # registering the object, because joining may take a
-        # savepoint, and the savepoint should not reflect the change
-        # to the object.
-
-        if self._needs_to_join:
-            self.transaction_manager.get().join(self)
-            self._needs_to_join = False
-
-        if obj is not None:
-            self._registered_objects.append(obj)
-
-
-    # persistent.interfaces.IPersistentDatamanager
-    ##########################################################################
-
-    ##########################################################################
-    # PROTECTED stuff (used by e.g. ZODB.DB.DB)
-
-    def _cache_items(self):
-        # find all items on the lru list
-        items = self._cache.lru_items()
-        # fine everything. some on the lru list, some not
-        everything = self._cache.cache_data
-        # remove those items that are on the lru list
-        for k,v in items:
-            del everything[k]
-        # return a list of [ghosts....not recently used.....recently used]
-        return everything.items() + items
-
-    def open(self, transaction_manager=None, delegate=True):
-        """Register odb, the DB that this Connection uses.
-
-        This method is called by the DB every time a Connection
-        is opened.  Any invalidations received while the Connection
-        was closed will be processed.
-
-        If the global module function resetCaches() was called, the
-        cache will be cleared.
-
-        Parameters:
-        odb: database that owns the Connection
-        transaction_manager: transaction manager to use.  None means
-            use the default transaction manager.
-        register for afterCompletion() calls.
-        """
-
-        self._opened = time()
-
-        if transaction_manager is None:
-            transaction_manager = transaction.manager
-
-        self.transaction_manager = transaction_manager
-
-        if self._reset_counter != global_reset_counter:
-            # New code is in place.  Start a new cache.
-            self._resetCache()
-        else:
-            self._flush_invalidations()
-
-        transaction_manager.registerSynch(self)
-
-        if self._cache is not None:
-            self._cache.incrgc() # This is a good time to do some GC
-
-        if delegate:
-            # delegate open to secondary connections
-            for connection in self.connections.values():
-                if connection is not self:
-                    connection.open(transaction_manager, False)
-
-    def _resetCache(self):
-        """Creates a new cache, discarding the old one.
-
-        See the docstring for the resetCaches() function.
-        """
-        self._reset_counter = global_reset_counter
-        self._invalidated.clear()
-        self._invalidatedCache = False
-        cache_size = self._cache.cache_size
-        self._cache = cache = PickleCache(self, cache_size)
-
-    ##########################################################################
-    # Python protocol
-
-    def __repr__(self):
-        if self._version:
-            ver = ' (in version %s)' % `self._version`
-        else:
-            ver = ''
-        return '<Connection at %08x%s>' % (positive_id(self), ver)
-
-    # Python protocol
-    ##########################################################################
-
-    ##########################################################################
-    # DEPRECATION candidates
-
-    __getitem__ = get
-
-    def modifiedInVersion(self, oid):
-        """Returns the version the object with the given oid was modified in.
-
-        If it wasn't modified in a version, the current version of this
-        connection is returned.
-        """
-        try:
-            return self._db.modifiedInVersion(oid)
-        except KeyError:
-            return self.getVersion()
-
-    def exchange(self, old, new):
-        # called by a ZClasses method that isn't executed by the test suite
-        oid = old._p_oid
-        new._p_oid = oid
-        new._p_jar = self
-        new._p_changed = 1
-        self._register(new)
-        self._cache[oid] = new
-
-    # DEPRECATION candidates
-    ##########################################################################
-
-    ##########################################################################
-    # DEPRECATED methods
-
-    # None at present.
-
-    # DEPRECATED methods
-    ##########################################################################
-
-    #####################################################################
-    # Savepoint support
-
-    def savepoint(self):
-        if self._savepoint_storage is None:
-            tmpstore = TmpStore(self._version, self._normal_storage)
-            self._savepoint_storage = tmpstore
-            self._storage = self._savepoint_storage
-
-        self._creating.clear()
-        self._commit(None)
-        self._storage.creating.update(self._creating)
-        self._creating.clear()
-        self._registered_objects = []
-
-        state = self._storage.position, self._storage.index.copy()
-        result = Savepoint(self, state)
-        # While the interface doesn't guarantee this, savepoints are
-        # sometimes used just to "break up" very long transactions, and as
-        # a pragmatic matter this is a good time to reduce the cache
-        # memory burden.
-        self.cacheGC()
-        return result
-
-    def _rollback(self, state):
-        self._abort()
-        self._registered_objects = []
-        src = self._storage
-        self._cache.invalidate(src.index)
-        src.reset(*state)
-
-    def _commit_savepoint(self, transaction):
-        """Commit all changes made in savepoints and begin 2-phase commit
-        """
-        src = self._savepoint_storage
-        self._storage = self._normal_storage
-        self._savepoint_storage = None
-
-        self._log.debug("Committing savepoints of size %s", src.getSize())
-        oids = src.index.keys()
-
-        # Copy invalidating and creating info from temporary storage:
-        self._modified.extend(oids)
-        self._creating.update(src.creating)
-
-        for oid in oids:
-            data, serial = src.load(oid, src)
-            if isinstance(self._reader.getGhost(data), Blob):
-                blobfilename = src.loadBlob(oid, serial)
-                s = self._storage.storeBlob(oid, serial, data, blobfilename,
-                                            self._version, transaction)
-                # we invalidate the object here in order to ensure
-                # that that the next attribute access of its name
-                # unghostify it, which will cause its blob data
-                # to be reattached "cleanly"
-                self.invalidate(s, {oid:True})
-            else:
-                s = self._storage.store(oid, serial, data,
-                                        self._version, transaction)
-
-            self._handle_serial(s, oid, change=False)
-        src.close()
-
-    def _abort_savepoint(self):
-        """Discard all savepoint data."""
-        src = self._savepoint_storage
-        self._storage = self._normal_storage
-        self._savepoint_storage = None
-
-        # Note: If we invalidate a non-ghostifiable object (i.e. a
-        # persistent class), the object will immediately reread it's
-        # state.  That means that the following call could result in a
-        # call to self.setstate, which, of course, must succeed.  In
-        # general, it would be better if the read could be delayed
-        # until the start of the next transaction.  If we read at the
-        # end of a transaction and if the object was invalidated
-        # during this transaction, then we'll read non-current data,
-        # which we'll discard later in transaction finalization.  We
-        # could, theoretically queue this invalidation by calling
-        # self.invalidate.  Unfortunately, attempts to make that
-        # change resulted in mysterious test failures.  It's pretty
-        # unlikely that the object we are invalidating was invalidated
-        # by another thread, so the risk of a reread is pretty low.
-        # It's really not worth the effort to pursue this.
-
-        self._cache.invalidate(src.index)
-        self._invalidate_creating(src.creating)
-        src.close()
-
-    # Savepoint support
-    #####################################################################
-
-class Savepoint:
-
-    implements(IDataManagerSavepoint)
-
-    def __init__(self, datamanager, state):
-        self.datamanager = datamanager
-        self.state = state
-
-    def rollback(self):
-        self.datamanager._rollback(self.state)
-
-class TmpStore:
-    """A storage-like thing to support savepoints."""
-
-    implements(IBlobStorage)
-
-    def __init__(self, base_version, storage):
-        self._storage = storage
-        for method in (
-            'getName', 'new_oid', 'getSize', 'sortKey', 'loadBefore',
-            ):
-            setattr(self, method, getattr(storage, method))
-
-        try:
-            supportsVersions = storage.supportsVersions
-        except AttributeError:
-            pass
-        else:
-            if supportsVersions():
-                self.modifiedInVersion = storage.modifiedInVersion
-                self.versionEmpty = storage.versionEmpty
-
-        self._base_version = base_version
-        self._file = tempfile.TemporaryFile()
-        # position: current file position
-        # _tpos: file position at last commit point
-        self.position = 0L
-        # index: map oid to pos of last committed version
-        self.index = {}
-        self.creating = {}
-
-    def __len__(self):
-        return len(self.index)
-
-    def close(self):
-        self._file.close()
-
-    def load(self, oid, version):
-        pos = self.index.get(oid)
-        if pos is None:
-            return self._storage.load(oid, self._base_version)
-        self._file.seek(pos)
-        h = self._file.read(8)
-        oidlen = u64(h)
-        read_oid = self._file.read(oidlen)
-        if read_oid != oid:
-            raise POSException.StorageSystemError('Bad temporary storage')
-        h = self._file.read(16)
-        size = u64(h[8:])
-        serial = h[:8]
-        return self._file.read(size), serial
-
-    def store(self, oid, serial, data, version, transaction):
-        # we have this funny signature so we can reuse the normal non-commit
-        # commit logic
-        assert version == self._base_version
-        self._file.seek(self.position)
-        l = len(data)
-        if serial is None:
-            serial = z64
-        header = p64(len(oid)) + oid + serial + p64(l)
-        self._file.write(header)
-        self._file.write(data)
-        self.index[oid] = self.position
-        self.position += l + len(header)
-        return serial
-
-    def storeBlob(self, oid, serial, data, blobfilename, version,
-                  transaction):
-        serial = self.store(oid, serial, data, version, transaction)
-
-        targetpath = self._getBlobPath()
-        if not os.path.exists(targetpath):
-            os.makedirs(targetpath, 0700)
-
-        targetname = self._getCleanFilename(oid, serial)
-        rename_or_copy_blob(blobfilename, targetname, chmod=False)
-
-    def loadBlob(self, oid, serial):
-        """Return the filename where the blob file can be found.
-        """
-        if not IBlobStorage.providedBy(self._storage):
-            raise Unsupported(
-                "Blobs are not supported by the underlying storage %r." %
-                self._storage)
-        filename = self._getCleanFilename(oid, serial)
-        if not os.path.exists(filename):
-            return self._storage.loadBlob(oid, serial)
-        return filename
-
-    def _getBlobPath(self):
-        return os.path.join(self.temporaryDirectory(), 'savepoints')
-
-    def _getCleanFilename(self, oid, tid):
-        return os.path.join(self._getBlobPath(),
-                            "%s-%s%s" % (utils.oid_repr(oid), utils.tid_repr(tid), SAVEPOINT_SUFFIX,)
-                            )
-
-    def temporaryDirectory(self):
-        return self._storage.temporaryDirectory()
-
-    def reset(self, position, index):
-        self._file.truncate(position)
-        self.position = position
-        # Caution:  We're typically called as part of a savepoint rollback.
-        # Other machinery remembers the index to restore, and passes it to
-        # us.  If we simply bind self.index to `index`, then if the caller
-        # didn't pass a copy of the index, the caller's index will mutate
-        # when self.index mutates.  This can be a disaster if the caller is a
-        # savepoint to which the user rolls back again later (the savepoint
-        # loses the original index it passed).  Therefore, to be safe, we make
-        # a copy of the index here.  An alternative would be to ensure that
-        # all callers pass copies.  As is, our callers do not make copies.
-        self.index = index.copy()

Copied: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/Connection.py (from rev 91360, Sandbox/wichert/ZODB38-jarn/src/ZODB/Connection.py)
===================================================================
--- Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/Connection.py	                        (rev 0)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/Connection.py	2008-09-22 16:53:27 UTC (rev 91361)
@@ -0,0 +1,1311 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE
+#
+##############################################################################
+"""Database connection support
+
+$Id$"""
+
+import logging
+import sys
+import tempfile
+import threading
+import warnings
+import os
+import shutil
+from time import time
+
+from persistent import PickleCache
+
+# interfaces
+from persistent.interfaces import IPersistentDataManager
+from ZODB.interfaces import IConnection
+from ZODB.interfaces import IBlobStorage
+from ZODB.blob import Blob, rename_or_copy_blob
+from transaction.interfaces import ISavepointDataManager
+from transaction.interfaces import IDataManagerSavepoint
+from transaction.interfaces import ISynchronizer
+from zope.interface import implements
+
+import transaction
+
+from ZODB.blob import SAVEPOINT_SUFFIX
+from ZODB.ConflictResolution import ResolvedSerial
+from ZODB.ExportImport import ExportImport
+from ZODB import POSException
+from ZODB.POSException import InvalidObjectReference, ConnectionStateError
+from ZODB.POSException import ConflictError, ReadConflictError
+from ZODB.POSException import Unsupported
+from ZODB.POSException import POSKeyError
+from ZODB.serialize import ObjectWriter, ObjectReader, myhasattr
+from ZODB.utils import p64, u64, z64, oid_repr, positive_id
+from ZODB import utils
+
+global_reset_counter = 0
+
+def resetCaches():
+    """Causes all connection caches to be reset as connections are reopened.
+
+    Zope's refresh feature uses this.  When you reload Python modules,
+    instances of classes continue to use the old class definitions.
+    To use the new code immediately, the refresh feature asks ZODB to
+    clear caches by calling resetCaches().  When the instances are
+    loaded by subsequent connections, they will use the new class
+    definitions.
+    """
+    global global_reset_counter
+    global_reset_counter += 1
+
+class Connection(ExportImport, object):
+    """Connection to ZODB for loading and storing objects."""
+
+    implements(IConnection,
+               ISavepointDataManager,
+               IPersistentDataManager,
+               ISynchronizer)
+
+
+    _code_timestamp = 0
+
+    ##########################################################################
+    # Connection methods, ZODB.IConnection
+
+    def __init__(self, db, version='', cache_size=400, cache_size_bytes=0):
+        """Create a new Connection."""
+
+        self._log = logging.getLogger('ZODB.Connection')
+        self._debug_info = ()
+
+        self._db = db
+        # Multi-database support
+        self.connections = {self._db.database_name: self}
+
+        self._version = version
+        self._normal_storage = self._storage = db._storage
+        self.new_oid = db._storage.new_oid
+        self._savepoint_storage = None
+
+        # Do we need to join a txn manager?
+        self._needs_to_join = True
+        self.transaction_manager = None
+        self._opened = None # time.time() when DB.open() opened us
+
+        self._reset_counter = global_reset_counter
+        self._load_count = 0   # Number of objects unghosted
+        self._store_count = 0  # Number of objects stored
+
+        # Cache which can ghostify (forget the state of) objects not
+        # recently used. Its API is roughly that of a dict, with
+        # additional gc-related and invalidation-related methods.
+        self._cache = PickleCache(self, cache_size, cache_size_bytes)
+
+        # The pre-cache is used by get to avoid infinite loops when
+        # objects immediately load their state whern they get their
+        # persistent data set.
+        self._pre_cache = {}
+        
+        if version:
+            # Caches for versions end up empty if the version
+            # is not used for a while. Non-version caches
+            # keep their content indefinitely.
+            # Unclear:  Why do we want version caches to behave this way?
+            self._cache.cache_drain_resistance = 100
+
+        # List of all objects (not oids) registered as modified by the
+        # persistence machinery, or by add(), or whose access caused a
+        # ReadConflictError (just to be able to clean them up from the
+        # cache on abort with the other modified objects). All objects
+        # of this list are either in _cache or in _added.
+        self._registered_objects = []
+
+        # Dict of oid->obj added explicitly through add(). Used as a
+        # preliminary cache until commit time when objects are all moved
+        # to the real _cache. The objects are moved to _creating at
+        # commit time.
+        self._added = {}
+
+        # During commit this is turned into a list, which receives
+        # objects added as a side-effect of storing a modified object.
+        self._added_during_commit = None
+
+        # During commit, all objects go to either _modified or _creating:
+
+        # Dict of oid->flag of new objects (without serial), either
+        # added by add() or implicitely added (discovered by the
+        # serializer during commit). The flag is True for implicit
+        # adding. Used during abort to remove created objects from the
+        # _cache, and by persistent_id to check that a new object isn't
+        # reachable from multiple databases.
+        self._creating = {}
+
+        # List of oids of modified objects, which have to be invalidated
+        # in the cache on abort and in other connections on finish.
+        self._modified = []
+
+
+        # _invalidated queues invalidate messages delivered from the DB
+        # _inv_lock prevents one thread from modifying the set while
+        # another is processing invalidations.  All the invalidations
+        # from a single transaction should be applied atomically, so
+        # the lock must be held when reading _invalidated.
+
+        # It sucks that we have to hold the lock to read _invalidated.
+        # Normally, _invalidated is written by calling dict.update, which
+        # will execute atomically by virtue of the GIL.  But some storage
+        # might generate oids where hash or compare invokes Python code.  In
+        # that case, the GIL can't save us.
+        # Note:  since that was written, it was officially declared that the
+        # type of an oid is str.  TODO:  remove the related now-unnecessary
+        # critical sections (if any -- this needs careful thought).
+
+        self._inv_lock = threading.Lock()
+        self._invalidated = set()
+
+        # Flag indicating whether the cache has been invalidated:
+        self._invalidatedCache = False
+
+        # We intend to prevent committing a transaction in which
+        # ReadConflictError occurs.  _conflicts is the set of oids that
+        # experienced ReadConflictError.  Any time we raise ReadConflictError,
+        # the oid should be added to this set, and we should be sure that the
+        # object is registered.  Because it's registered, Connection.commit()
+        # will raise ReadConflictError again (because the oid is in
+        # _conflicts).
+        self._conflicts = {}
+
+        # If MVCC is enabled, then _mvcc is True and _txn_time stores
+        # the upper bound on transactions visible to this connection.
+        # That is, all object revisions must be written before _txn_time.
+        # If it is None, then the current revisions are acceptable.
+        # If the connection is in a version, mvcc will be disabled, because
+        # loadBefore() only returns non-version data.
+        self._txn_time = None
+
+        # To support importFile(), implemented in the ExportImport base
+        # class, we need to run _importDuringCommit() from our commit()
+        # method.  If _import is not None, it is a two-tuple of arguments
+        # to pass to _importDuringCommit().
+        self._import = None
+
+        self._reader = ObjectReader(self, self._cache, self._db.classFactory)
+
+
+    def add(self, obj):
+        """Add a new object 'obj' to the database and assign it an oid."""
+        if self._opened is None:
+            raise ConnectionStateError("The database connection is closed")
+
+        marker = object()
+        oid = getattr(obj, "_p_oid", marker)
+        if oid is marker:
+            raise TypeError("Only first-class persistent objects may be"
+                            " added to a Connection.", obj)
+        elif obj._p_jar is None:
+            assert obj._p_oid is None
+            oid = obj._p_oid = self._storage.new_oid()
+            obj._p_jar = self
+            if self._added_during_commit is not None:
+                self._added_during_commit.append(obj)
+            self._register(obj)
+            # Add to _added after calling register(), so that _added
+            # can be used as a test for whether the object has been
+            # registered with the transaction.
+            self._added[oid] = obj
+        elif obj._p_jar is not self:
+            raise InvalidObjectReference(obj, obj._p_jar)
+
+    def get(self, oid):
+        """Return the persistent object with oid 'oid'."""
+        if self._opened is None:
+            raise ConnectionStateError("The database connection is closed")
+
+        obj = self._cache.get(oid, None)
+        if obj is not None:
+            return obj
+        obj = self._added.get(oid, None)
+        if obj is not None:
+            return obj        
+        obj = self._pre_cache.get(oid, None)
+        if obj is not None:
+            return obj
+
+        # This appears to be an MVCC violation because we are loading
+        # the must recent data when perhaps we shouldnt. The key is
+        # that we are only creating a ghost!        
+        p, serial = self._storage.load(oid, self._version)
+        obj = self._reader.getGhost(p)
+
+        # Avoid infiniate loop if obj tries to load its state before
+        # it is added to the cache and it's state refers to it.
+        self._pre_cache[oid] = obj
+        obj._p_oid = oid
+        obj._p_jar = self
+        obj._p_changed = None
+        obj._p_serial = serial
+        self._pre_cache.pop(oid)
+        self._cache[oid] = obj
+        return obj
+
+    def cacheMinimize(self):
+        """Deactivate all unmodified objects in the cache."""
+        self._cache.minimize()
+
+    # TODO: we should test what happens when cacheGC is called mid-transaction.
+    def cacheGC(self):
+        """Reduce cache size to target size."""
+        self._cache.incrgc()
+
+    __onCloseCallbacks = None
+    def onCloseCallback(self, f):
+        """Register a callable, f, to be called by close()."""
+        if self.__onCloseCallbacks is None:
+            self.__onCloseCallbacks = []
+        self.__onCloseCallbacks.append(f)
+
+    def close(self, primary=True):
+        """Close the Connection."""
+        if not self._needs_to_join:
+            # We're currently joined to a transaction.
+            raise ConnectionStateError("Cannot close a connection joined to "
+                                       "a transaction")
+
+        if self._cache is not None:
+            self._cache.incrgc() # This is a good time to do some GC
+
+        # Call the close callbacks.
+        if self.__onCloseCallbacks is not None:
+            for f in self.__onCloseCallbacks:
+                try:
+                    f()
+                except: # except what?
+                    f = getattr(f, 'im_self', f)
+                    self._log.error("Close callback failed for %s", f,
+                                    exc_info=sys.exc_info())
+            self.__onCloseCallbacks = None
+
+        self._debug_info = ()
+
+        if self._opened:
+            self.transaction_manager.unregisterSynch(self)
+
+        if primary:
+            for connection in self.connections.values():
+                if connection is not self:
+                    connection.close(False)
+
+            # Return the connection to the pool.
+            if self._opened is not None:
+                self._db._returnToPool(self)
+
+                # _returnToPool() set self._opened to None.
+                # However, we can't assert that here, because self may
+                # have been reused (by another thread) by the time we
+                # get back here.
+        else:
+            self._opened = None
+            am = self._db._activity_monitor
+            if am is not None:
+                am.closedConnection(self)
+
+    def db(self):
+        """Returns a handle to the database this connection belongs to."""
+        return self._db
+
+    def isReadOnly(self):
+        """Returns True if the storage for this connection is read only."""
+        if self._opened is None:
+            raise ConnectionStateError("The database connection is closed")
+        return self._storage.isReadOnly()
+
+    def invalidate(self, tid, oids):
+        """Notify the Connection that transaction 'tid' invalidated oids."""
+        self._inv_lock.acquire()
+        try:
+            if self._txn_time is None:
+                self._txn_time = tid
+            self._invalidated.update(oids)
+        finally:
+            self._inv_lock.release()
+
+    def invalidateCache(self):
+        self._inv_lock.acquire()
+        try:
+            self._invalidatedCache = True
+        finally:
+            self._inv_lock.release()
+        
+
+    def root(self):
+        """Return the database root object."""
+        return self.get(z64)
+
+    def getVersion(self):
+        """Returns the version this connection is attached to."""
+        if self._storage is None:
+            raise ConnectionStateError("The database connection is closed")
+        return self._version
+
+    def get_connection(self, database_name):
+        """Return a Connection for the named database."""
+        connection = self.connections.get(database_name)
+        if connection is None:
+            new_con = self._db.databases[database_name].open(
+                transaction_manager=self.transaction_manager,
+                version=self._version,
+                )
+            self.connections.update(new_con.connections)
+            new_con.connections = self.connections
+            connection = new_con
+        return connection
+
+    def _implicitlyAdding(self, oid):
+        """Are we implicitly adding an object within the current transaction
+
+        This is used in a check to avoid implicitly adding an object
+        to a database in a multi-database situation.
+        See serialize.ObjectWriter.persistent_id.
+
+        """
+        return (self._creating.get(oid, 0)
+                or
+                ((self._savepoint_storage is not None)
+                 and
+                 self._savepoint_storage.creating.get(oid, 0)
+                 )
+                )
+
+    def sync(self):
+        """Manually update the view on the database."""
+        self.transaction_manager.abort()
+        self._storage_sync()
+
+    def getDebugInfo(self):
+        """Returns a tuple with different items for debugging the
+        connection.
+        """
+        return self._debug_info
+
+    def setDebugInfo(self, *args):
+        """Add the given items to the debug information of this connection."""
+        self._debug_info = self._debug_info + args
+
+    def getTransferCounts(self, clear=False):
+        """Returns the number of objects loaded and stored."""
+        res = self._load_count, self._store_count
+        if clear:
+            self._load_count = 0
+            self._store_count = 0
+        return res
+
+    # Connection methods
+    ##########################################################################
+
+    ##########################################################################
+    # Data manager (ISavepointDataManager) methods
+
+    def abort(self, transaction):
+        """Abort a transaction and forget all changes."""
+
+        # The order is important here.  We want to abort registered
+        # objects before we process the cache.  Otherwise, we may un-add
+        # objects added in savepoints.  If they've been modified since
+        # the savepoint, then they won't have _p_oid or _p_jar after
+        # they've been unadded. This will make the code in _abort
+        # confused.
+
+        self._abort()
+
+        if self._savepoint_storage is not None:
+            self._abort_savepoint()
+
+        self._tpc_cleanup()
+
+    def _abort(self):
+        """Abort a transaction and forget all changes."""
+
+        for obj in self._registered_objects:
+            oid = obj._p_oid
+            assert oid is not None
+            if oid in self._added:
+                del self._added[oid]
+                del obj._p_jar
+                del obj._p_oid
+            else:
+
+                # Note: If we invalidate a non-ghostifiable object
+                # (i.e. a persistent class), the object will
+                # immediately reread its state.  That means that the
+                # following call could result in a call to
+                # self.setstate, which, of course, must succeed.
+                # In general, it would be better if the read could be
+                # delayed until the start of the next transaction.  If
+                # we read at the end of a transaction and if the
+                # object was invalidated during this transaction, then
+                # we'll read non-current data, which we'll discard
+                # later in transaction finalization.  Unfortnately, we
+                # can only delay the read if this abort corresponds to
+                # a top-level-transaction abort.  We can't tell if
+                # this is a top-level-transaction abort, so we have to
+                # go ahead and invalidate now.  Fortunately, it's
+                # pretty unlikely that the object we are invalidating
+                # was invalidated by another thread, so the risk of a
+                # reread is pretty low.
+
+                self._cache.invalidate(oid)
+
+    def _tpc_cleanup(self):
+        """Performs cleanup operations to support tpc_finish and tpc_abort."""
+        self._conflicts.clear()
+        self._needs_to_join = True
+        self._registered_objects = []
+        self._creating.clear()
+
+    # Process pending invalidations.
+    def _flush_invalidations(self):
+        self._inv_lock.acquire()
+        try:
+            # Non-ghostifiable objects may need to read when they are
+            # invalidated, so we'll quickly just replace the
+            # invalidating dict with a new one.  We'll then process
+            # the invalidations after freeing the lock *and* after
+            # resetting the time.  This means that invalidations will
+            # happen after the start of the transactions.  They are
+            # subject to conflict errors and to reading old data.
+
+            # TODO: There is a potential problem lurking for persistent
+            # classes.  Suppose we have an invalidation of a persistent
+            # class and of an instance.  If the instance is
+            # invalidated first and if the invalidation logic uses
+            # data read from the class, then the invalidation could
+            # be performed with stale data.  Or, suppose that there
+            # are instances of the class that are freed as a result of
+            # invalidating some object.  Perhaps code in their __del__
+            # uses class data.  Really, the only way to properly fix
+            # this is to, in fact, make classes ghostifiable.  Then
+            # we'd have to reimplement attribute lookup to check the
+            # class state and, if necessary, activate the class.  It's
+            # much worse than that though, because we'd also need to
+            # deal with slots.  When a class is ghostified, we'd need
+            # to replace all of the slot operations with versions that
+            # reloaded the object when called. It's hard to say which
+            # is better or worse.  For now, it seems the risk of
+            # using a class while objects are being invalidated seems
+            # small enough to be acceptable.
+
+            invalidated = dict.fromkeys(self._invalidated)
+            self._invalidated = set()
+            self._txn_time = None
+            if self._invalidatedCache:
+                self._invalidatedCache = False
+                invalidated = self._cache.cache_data.copy()
+        finally:
+            self._inv_lock.release()
+
+        self._cache.invalidate(invalidated)
+
+        # Now is a good time to collect some garbage.
+        self._cache.incrgc()
+
+    def tpc_begin(self, transaction):
+        """Begin commit of a transaction, starting the two-phase commit."""
+        self._modified = []
+
+        # _creating is a list of oids of new objects, which is used to
+        # remove them from the cache if a transaction aborts.
+        self._creating.clear()
+        self._normal_storage.tpc_begin(transaction)
+
+    def commit(self, transaction):
+        """Commit changes to an object"""
+
+        if self._savepoint_storage is not None:
+
+            # We first checkpoint the current changes to the savepoint
+            self.savepoint()
+
+            # then commit all of the savepoint changes at once
+            self._commit_savepoint(transaction)
+
+            # No need to call _commit since savepoint did.
+
+        else:
+            self._commit(transaction)
+
+    def _commit(self, transaction):
+        """Commit changes to an object"""
+
+        if self._import:
+            # We are importing an export file. We alsways do this
+            # while making a savepoint so we can copy export data
+            # directly to our storage, typically a TmpStore.
+            self._importDuringCommit(transaction, *self._import)
+            self._import = None
+
+        # Just in case an object is added as a side-effect of storing
+        # a modified object.  If, for example, a __getstate__() method
+        # calls add(), the newly added objects will show up in
+        # _added_during_commit.  This sounds insane, but has actually
+        # happened.
+
+        self._added_during_commit = []
+
+        if self._invalidatedCache:
+            raise ConflictError()            
+
+        for obj in self._registered_objects:
+            oid = obj._p_oid
+            assert oid
+            if oid in self._conflicts:
+                raise ReadConflictError(object=obj)
+
+            if obj._p_jar is not self:
+                raise InvalidObjectReference(obj, obj._p_jar)
+            elif oid in self._added:
+                assert obj._p_serial == z64
+            elif obj._p_changed:
+                if oid in self._invalidated:
+                    resolve = getattr(obj, "_p_resolveConflict", None)
+                    if resolve is None:
+                        raise ConflictError(object=obj)
+                self._modified.append(oid)
+            else:
+                # Nothing to do.  It's been said that it's legal, e.g., for
+                # an object to set _p_changed to false after it's been
+                # changed and registered.
+                continue
+
+            self._store_objects(ObjectWriter(obj), transaction)
+
+        for obj in self._added_during_commit:
+            self._store_objects(ObjectWriter(obj), transaction)
+        self._added_during_commit = None
+
+    def _store_objects(self, writer, transaction):
+        for obj in writer:
+            oid = obj._p_oid
+            serial = getattr(obj, "_p_serial", z64)
+
+            if ((serial == z64)
+                and
+                ((self._savepoint_storage is None)
+                 or (oid not in self._savepoint_storage.creating)
+                 or self._savepoint_storage.creating[oid]
+                 )
+                ):
+                
+                # obj is a new object
+
+                # Because obj was added, it is now in _creating, so it
+                # can be removed from _added.  If oid wasn't in
+                # adding, then we are adding it implicitly.
+
+                implicitly_adding = self._added.pop(oid, None) is None
+
+                self._creating[oid] = implicitly_adding
+
+            else:
+                if (oid in self._invalidated
+                    and not hasattr(obj, '_p_resolveConflict')):
+                    raise ConflictError(object=obj)
+                self._modified.append(oid)
+            p = writer.serialize(obj)  # This calls __getstate__ of obj
+
+            if isinstance(obj, Blob):
+                if not IBlobStorage.providedBy(self._storage):
+                    raise Unsupported(
+                        "Storing Blobs in %s is not supported." % 
+                        repr(self._storage))
+                if obj.opened():
+                    raise ValueError("Can't commit with opened blobs.")
+                s = self._storage.storeBlob(oid, serial, p,
+                                            obj._uncommitted(),
+                                            self._version, transaction)
+                # we invalidate the object here in order to ensure
+                # that that the next attribute access of its name
+                # unghostify it, which will cause its blob data
+                # to be reattached "cleanly"
+                obj._p_invalidate()
+            else:
+                s = self._storage.store(oid, serial, p, self._version,
+                                        transaction)
+            self._cache.update_object_size_estimation(oid,
+                                                   len(p)
+                                                   )
+            obj._p_estimated_size = len(p)
+            self._store_count += 1
+            # Put the object in the cache before handling the
+            # response, just in case the response contains the
+            # serial number for a newly created object
+            try:
+                self._cache[oid] = obj
+            except:
+                # Dang, I bet it's wrapped:
+                # TODO:  Deprecate, then remove, this.
+                if hasattr(obj, 'aq_base'):
+                    self._cache[oid] = obj.aq_base
+                else:
+                    raise
+
+            self._handle_serial(s, oid)
+
+    def _handle_serial(self, store_return, oid=None, change=1):
+        """Handle the returns from store() and tpc_vote() calls."""
+
+        # These calls can return different types depending on whether
+        # ZEO is used.  ZEO uses asynchronous returns that may be
+        # returned in batches by the ClientStorage.  ZEO1 can also
+        # return an exception object and expect that the Connection
+        # will raise the exception.
+
+        # When conflict resolution occurs, the object state held by
+        # the connection does not match what is written to the
+        # database.  Invalidate the object here to guarantee that
+        # the new state is read the next time the object is used.
+
+        if not store_return:
+            return
+        if isinstance(store_return, str):
+            assert oid is not None
+            self._handle_one_serial(oid, store_return, change)
+        else:
+            for oid, serial in store_return:
+                self._handle_one_serial(oid, serial, change)
+
+    def _handle_one_serial(self, oid, serial, change):
+        if not isinstance(serial, str):
+            raise serial
+        obj = self._cache.get(oid, None)
+        if obj is None:
+            return
+        if serial == ResolvedSerial:
+            del obj._p_changed # transition from changed to ghost
+        else:
+            if change:
+                obj._p_changed = 0 # transition from changed to up-to-date
+            obj._p_serial = serial
+
+    def tpc_abort(self, transaction):
+        if self._import:
+            self._import = None
+
+        if self._savepoint_storage is not None:
+            self._abort_savepoint()
+
+        self._storage.tpc_abort(transaction)
+
+        # Note: If we invalidate a non-ghostifiable object (i.e. a
+        # persistent class), the object will immediately reread its
+        # state.  That means that the following call could result in a
+        # call to self.setstate, which, of course, must succeed.  In
+        # general, it would be better if the read could be delayed
+        # until the start of the next transaction.  If we read at the
+        # end of a transaction and if the object was invalidated
+        # during this transaction, then we'll read non-current data,
+        # which we'll discard later in transaction finalization.  We
+        # could, theoretically queue this invalidation by calling
+        # self.invalidate.  Unfortunately, attempts to make that
+        # change resulted in mysterious test failures.  It's pretty
+        # unlikely that the object we are invalidating was invalidated
+        # by another thread, so the risk of a reread is pretty low.
+        # It's really not worth the effort to pursue this.
+
+        self._cache.invalidate(self._modified)
+        self._invalidate_creating()
+        while self._added:
+            oid, obj = self._added.popitem()
+            del obj._p_oid
+            del obj._p_jar
+        self._tpc_cleanup()
+
+    def _invalidate_creating(self, creating=None):
+        """Disown any objects newly saved in an uncommitted transaction."""
+        if creating is None:
+            creating = self._creating
+            self._creating = {}
+
+        for oid in creating:
+            o = self._cache.get(oid)
+            if o is not None:
+                del self._cache[oid]
+                del o._p_jar
+                del o._p_oid
+
+    def tpc_vote(self, transaction):
+        """Verify that a data manager can commit the transaction."""
+        try:
+            vote = self._storage.tpc_vote
+        except AttributeError:
+            return
+        s = vote(transaction)
+        self._handle_serial(s)
+
+    def tpc_finish(self, transaction):
+        """Indicate confirmation that the transaction is done."""
+
+        def callback(tid):
+            d = dict.fromkeys(self._modified)
+            self._db.invalidate(tid, d, self)
+#       It's important that the storage calls the passed function
+#       while it still has its lock.  We don't want another thread
+#       to be able to read any updated data until we've had a chance
+#       to send an invalidation message to all of the other
+#       connections!
+        self._storage.tpc_finish(transaction, callback)
+        self._tpc_cleanup()
+
+    def sortKey(self):
+        """Return a consistent sort key for this connection."""
+        return "%s:%s" % (self._storage.sortKey(), id(self))
+
+    # Data manager (ISavepointDataManager) methods
+    ##########################################################################
+
+    ##########################################################################
+    # Transaction-manager synchronization -- ISynchronizer
+
+    def beforeCompletion(self, txn):
+        # We don't do anything before a commit starts.
+        pass
+
+    # Call the underlying storage's sync() method (if any), and process
+    # pending invalidations regardless.  Of course this should only be
+    # called at transaction boundaries.
+    def _storage_sync(self, *ignored):
+        sync = getattr(self._storage, 'sync', 0)
+        if sync:
+            sync()
+        self._flush_invalidations()
+
+    afterCompletion =  _storage_sync
+    newTransaction = _storage_sync
+
+     # Transaction-manager synchronization -- ISynchronizer
+    ##########################################################################
+
+    ##########################################################################
+    # persistent.interfaces.IPersistentDatamanager
+
+    def oldstate(self, obj, tid):
+        """Return copy of 'obj' that was written by transaction 'tid'."""
+        assert obj._p_jar is self
+        p = self._storage.loadSerial(obj._p_oid, tid)
+        return self._reader.getState(p)
+
+    def setstate(self, obj):
+        """Turns the ghost 'obj' into a real object by loading its state from
+        the database."""
+        oid = obj._p_oid
+
+        if self._opened is None:
+            msg = ("Shouldn't load state for %s "
+                   "when the connection is closed" % oid_repr(oid))
+            self._log.error(msg)
+            raise ConnectionStateError(msg)
+
+        try:
+            self._setstate(obj)
+        except ConflictError:
+            raise
+        except:
+            self._log.error("Couldn't load state for %s", oid_repr(oid),
+                            exc_info=sys.exc_info())
+            raise
+
+    def _setstate(self, obj):
+        # Helper for setstate(), which provides logging of failures.
+
+        # The control flow is complicated here to avoid loading an
+        # object revision that we are sure we aren't going to use.  As
+        # a result, invalidation tests occur before and after the
+        # load.  We can only be sure about invalidations after the
+        # load.
+
+        # If an object has been invalidated, there are several cases
+        # to consider:
+        # 1. Check _p_independent()
+        # 2. Try MVCC
+        # 3. Raise ConflictError.
+
+        # Does anything actually use _p_independent()?  It would simplify
+        # the code if we could drop support for it.  
+        # (BTrees.Length does.)
+
+        # There is a harmless data race with self._invalidated.  A
+        # dict update could go on in another thread, but we don't care
+        # because we have to check again after the load anyway.
+
+
+        if self._invalidatedCache:
+            raise ReadConflictError()
+
+        if (obj._p_oid in self._invalidated and
+                not myhasattr(obj, "_p_independent")):
+            # If the object has _p_independent(), we will handle it below.
+            self._load_before_or_conflict(obj)
+            return
+
+        p, serial = self._storage.load(obj._p_oid, self._version)
+        self._load_count += 1
+
+        self._inv_lock.acquire()
+        try:
+            invalid = obj._p_oid in self._invalidated
+        finally:
+            self._inv_lock.release()
+
+        if invalid:
+            if myhasattr(obj, "_p_independent"):
+                # This call will raise a ReadConflictError if something
+                # goes wrong
+                self._handle_independent(obj)
+            else:
+                self._load_before_or_conflict(obj)
+                return
+
+        self._reader.setGhostState(obj, p)
+        obj._p_serial = serial
+        self._cache.update_object_size_estimation(obj._p_oid,
+                                               len(p)
+                                               )
+        obj._p_estimated_size = len(p)
+
+        # Blob support
+        if isinstance(obj, Blob):
+            obj._p_blob_uncommitted = None
+            obj._p_blob_committed = self._storage.loadBlob(obj._p_oid, serial)
+
+    def _load_before_or_conflict(self, obj):
+        """Load non-current state for obj or raise ReadConflictError."""
+        if not ((not self._version) and self._setstate_noncurrent(obj)):
+            self._register(obj)
+            self._conflicts[obj._p_oid] = True
+            raise ReadConflictError(object=obj)
+
+    def _setstate_noncurrent(self, obj):
+        """Set state using non-current data.
+
+        Return True if state was available, False if not.
+        """
+        try:
+            # Load data that was current before the commit at txn_time.
+            t = self._storage.loadBefore(obj._p_oid, self._txn_time)
+        except KeyError:
+            return False
+        if t is None:
+            return False
+        data, start, end = t
+        # The non-current transaction must have been written before
+        # txn_time.  It must be current at txn_time, but could have
+        # been modified at txn_time.
+
+        assert start < self._txn_time, (u64(start), u64(self._txn_time))
+        assert end is not None
+        assert self._txn_time <= end, (u64(self._txn_time), u64(end))
+        self._reader.setGhostState(obj, data)
+        obj._p_serial = start
+        return True
+
+    def _handle_independent(self, obj):
+        # Helper method for setstate() handles possibly independent objects
+        # Call _p_independent(), if it returns True, setstate() wins.
+        # Otherwise, raise a ConflictError.
+
+        if obj._p_independent():
+            self._inv_lock.acquire()
+            try:
+                try:
+                    self._invalidated.remove(obj._p_oid)
+                except KeyError:
+                    pass
+            finally:
+                self._inv_lock.release()
+        else:
+            self._conflicts[obj._p_oid] = 1
+            self._register(obj)
+            raise ReadConflictError(object=obj)
+
+    def register(self, obj):
+        """Register obj with the current transaction manager.
+
+        A subclass could override this method to customize the default
+        policy of one transaction manager for each thread.
+
+        obj must be an object loaded from this Connection.
+        """
+        assert obj._p_jar is self
+        if obj._p_oid is None:
+            # The actual complaint here is that an object without
+            # an oid is being registered.  I can't think of any way to
+            # achieve that without assignment to _p_jar.  If there is
+            # a way, this will be a very confusing exception.
+            raise ValueError("assigning to _p_jar is not supported")
+        elif obj._p_oid in self._added:
+            # It was registered before it was added to _added.
+            return
+        self._register(obj)
+
+    def _register(self, obj=None):
+
+        # The order here is important.  We need to join before
+        # registering the object, because joining may take a
+        # savepoint, and the savepoint should not reflect the change
+        # to the object.
+
+        if self._needs_to_join:
+            self.transaction_manager.get().join(self)
+            self._needs_to_join = False
+
+        if obj is not None:
+            self._registered_objects.append(obj)
+
+
+    # persistent.interfaces.IPersistentDatamanager
+    ##########################################################################
+
+    ##########################################################################
+    # PROTECTED stuff (used by e.g. ZODB.DB.DB)
+
+    def _cache_items(self):
+        # find all items on the lru list
+        items = self._cache.lru_items()
+        # fine everything. some on the lru list, some not
+        everything = self._cache.cache_data
+        # remove those items that are on the lru list
+        for k,v in items:
+            del everything[k]
+        # return a list of [ghosts....not recently used.....recently used]
+        return everything.items() + items
+
+    def open(self, transaction_manager=None, delegate=True):
+        """Register odb, the DB that this Connection uses.
+
+        This method is called by the DB every time a Connection
+        is opened.  Any invalidations received while the Connection
+        was closed will be processed.
+
+        If the global module function resetCaches() was called, the
+        cache will be cleared.
+
+        Parameters:
+        odb: database that owns the Connection
+        transaction_manager: transaction manager to use.  None means
+            use the default transaction manager.
+        register for afterCompletion() calls.
+        """
+
+        self._opened = time()
+
+        if transaction_manager is None:
+            transaction_manager = transaction.manager
+
+        self.transaction_manager = transaction_manager
+
+        if self._reset_counter != global_reset_counter:
+            # New code is in place.  Start a new cache.
+            self._resetCache()
+        else:
+            self._flush_invalidations()
+
+        transaction_manager.registerSynch(self)
+
+        if self._cache is not None:
+            self._cache.incrgc() # This is a good time to do some GC
+
+        if delegate:
+            # delegate open to secondary connections
+            for connection in self.connections.values():
+                if connection is not self:
+                    connection.open(transaction_manager, False)
+
+    def _resetCache(self):
+        """Creates a new cache, discarding the old one.
+
+        See the docstring for the resetCaches() function.
+        """
+        self._reset_counter = global_reset_counter
+        self._invalidated.clear()
+        self._invalidatedCache = False
+        cache_size = self._cache.cache_size
+        cache_size_bytes = self._cache.cache_size_bytes
+        self._cache = cache = PickleCache(self, cache_size, cache_size_bytes)
+
+    ##########################################################################
+    # Python protocol
+
+    def __repr__(self):
+        if self._version:
+            ver = ' (in version %s)' % `self._version`
+        else:
+            ver = ''
+        return '<Connection at %08x%s>' % (positive_id(self), ver)
+
+    # Python protocol
+    ##########################################################################
+
+    ##########################################################################
+    # DEPRECATION candidates
+
+    __getitem__ = get
+
+    def modifiedInVersion(self, oid):
+        """Returns the version the object with the given oid was modified in.
+
+        If it wasn't modified in a version, the current version of this
+        connection is returned.
+        """
+        try:
+            return self._db.modifiedInVersion(oid)
+        except KeyError:
+            return self.getVersion()
+
+    def exchange(self, old, new):
+        # called by a ZClasses method that isn't executed by the test suite
+        oid = old._p_oid
+        new._p_oid = oid
+        new._p_jar = self
+        new._p_changed = 1
+        self._register(new)
+        self._cache[oid] = new
+
+    # DEPRECATION candidates
+    ##########################################################################
+
+    ##########################################################################
+    # DEPRECATED methods
+
+    # None at present.
+
+    # DEPRECATED methods
+    ##########################################################################
+
+    #####################################################################
+    # Savepoint support
+
+    def savepoint(self):
+        if self._savepoint_storage is None:
+            tmpstore = TmpStore(self._version, self._normal_storage)
+            self._savepoint_storage = tmpstore
+            self._storage = self._savepoint_storage
+
+        self._creating.clear()
+        self._commit(None)
+        self._storage.creating.update(self._creating)
+        self._creating.clear()
+        self._registered_objects = []
+
+        state = self._storage.position, self._storage.index.copy()
+        result = Savepoint(self, state)
+        # While the interface doesn't guarantee this, savepoints are
+        # sometimes used just to "break up" very long transactions, and as
+        # a pragmatic matter this is a good time to reduce the cache
+        # memory burden.
+        self.cacheGC()
+        return result
+
+    def _rollback(self, state):
+        self._abort()
+        self._registered_objects = []
+        src = self._storage
+        self._cache.invalidate(src.index)
+        src.reset(*state)
+
+    def _commit_savepoint(self, transaction):
+        """Commit all changes made in savepoints and begin 2-phase commit
+        """
+        src = self._savepoint_storage
+        self._storage = self._normal_storage
+        self._savepoint_storage = None
+
+        self._log.debug("Committing savepoints of size %s", src.getSize())
+        oids = src.index.keys()
+
+        # Copy invalidating and creating info from temporary storage:
+        self._modified.extend(oids)
+        self._creating.update(src.creating)
+
+        for oid in oids:
+            data, serial = src.load(oid, src)
+            obj = self._cache.get(oid, None)
+            if obj is not None:
+                self._cache.update_object_size_estimation(obj._p_oid,
+                                                       len(data)
+                                                       )
+                obj._p_estimated_size = len(data)
+            if isinstance(self._reader.getGhost(data), Blob):
+                blobfilename = src.loadBlob(oid, serial)
+                s = self._storage.storeBlob(oid, serial, data, blobfilename,
+                                            self._version, transaction)
+                # we invalidate the object here in order to ensure
+                # that that the next attribute access of its name
+                # unghostify it, which will cause its blob data
+                # to be reattached "cleanly"
+                self.invalidate(s, {oid:True})
+            else:
+                s = self._storage.store(oid, serial, data,
+                                        self._version, transaction)
+
+            self._handle_serial(s, oid, change=False)
+        src.close()
+
+    def _abort_savepoint(self):
+        """Discard all savepoint data."""
+        src = self._savepoint_storage
+        self._storage = self._normal_storage
+        self._savepoint_storage = None
+
+        # Note: If we invalidate a non-ghostifiable object (i.e. a
+        # persistent class), the object will immediately reread it's
+        # state.  That means that the following call could result in a
+        # call to self.setstate, which, of course, must succeed.  In
+        # general, it would be better if the read could be delayed
+        # until the start of the next transaction.  If we read at the
+        # end of a transaction and if the object was invalidated
+        # during this transaction, then we'll read non-current data,
+        # which we'll discard later in transaction finalization.  We
+        # could, theoretically queue this invalidation by calling
+        # self.invalidate.  Unfortunately, attempts to make that
+        # change resulted in mysterious test failures.  It's pretty
+        # unlikely that the object we are invalidating was invalidated
+        # by another thread, so the risk of a reread is pretty low.
+        # It's really not worth the effort to pursue this.
+
+        self._cache.invalidate(src.index)
+        self._invalidate_creating(src.creating)
+        src.close()
+
+    # Savepoint support
+    #####################################################################
+
+class Savepoint:
+
+    implements(IDataManagerSavepoint)
+
+    def __init__(self, datamanager, state):
+        self.datamanager = datamanager
+        self.state = state
+
+    def rollback(self):
+        self.datamanager._rollback(self.state)
+
+class TmpStore:
+    """A storage-like thing to support savepoints."""
+
+    implements(IBlobStorage)
+
+    def __init__(self, base_version, storage):
+        self._storage = storage
+        for method in (
+            'getName', 'new_oid', 'getSize', 'sortKey', 'loadBefore',
+            ):
+            setattr(self, method, getattr(storage, method))
+
+        try:
+            supportsVersions = storage.supportsVersions
+        except AttributeError:
+            pass
+        else:
+            if supportsVersions():
+                self.modifiedInVersion = storage.modifiedInVersion
+                self.versionEmpty = storage.versionEmpty
+
+        self._base_version = base_version
+        self._file = tempfile.TemporaryFile()
+        # position: current file position
+        # _tpos: file position at last commit point
+        self.position = 0L
+        # index: map oid to pos of last committed version
+        self.index = {}
+        self.creating = {}
+
+    def __len__(self):
+        return len(self.index)
+
+    def close(self):
+        self._file.close()
+
+    def load(self, oid, version):
+        pos = self.index.get(oid)
+        if pos is None:
+            return self._storage.load(oid, self._base_version)
+        self._file.seek(pos)
+        h = self._file.read(8)
+        oidlen = u64(h)
+        read_oid = self._file.read(oidlen)
+        if read_oid != oid:
+            raise POSException.StorageSystemError('Bad temporary storage')
+        h = self._file.read(16)
+        size = u64(h[8:])
+        serial = h[:8]
+        return self._file.read(size), serial
+
+    def store(self, oid, serial, data, version, transaction):
+        # we have this funny signature so we can reuse the normal non-commit
+        # commit logic
+        assert version == self._base_version
+        self._file.seek(self.position)
+        l = len(data)
+        if serial is None:
+            serial = z64
+        header = p64(len(oid)) + oid + serial + p64(l)
+        self._file.write(header)
+        self._file.write(data)
+        self.index[oid] = self.position
+        self.position += l + len(header)
+        return serial
+
+    def storeBlob(self, oid, serial, data, blobfilename, version,
+                  transaction):
+        serial = self.store(oid, serial, data, version, transaction)
+
+        targetpath = self._getBlobPath()
+        if not os.path.exists(targetpath):
+            os.makedirs(targetpath, 0700)
+
+        targetname = self._getCleanFilename(oid, serial)
+        rename_or_copy_blob(blobfilename, targetname, chmod=False)
+
+    def loadBlob(self, oid, serial):
+        """Return the filename where the blob file can be found.
+        """
+        if not IBlobStorage.providedBy(self._storage):
+            raise Unsupported(
+                "Blobs are not supported by the underlying storage %r." %
+                self._storage)
+        filename = self._getCleanFilename(oid, serial)
+        if not os.path.exists(filename):
+            return self._storage.loadBlob(oid, serial)
+        return filename
+
+    def _getBlobPath(self):
+        return os.path.join(self.temporaryDirectory(), 'savepoints')
+
+    def _getCleanFilename(self, oid, tid):
+        return os.path.join(self._getBlobPath(),
+                            "%s-%s%s" % (utils.oid_repr(oid), utils.tid_repr(tid), SAVEPOINT_SUFFIX,)
+                            )
+
+    def temporaryDirectory(self):
+        return self._storage.temporaryDirectory()
+
+    def reset(self, position, index):
+        self._file.truncate(position)
+        self.position = position
+        # Caution:  We're typically called as part of a savepoint rollback.
+        # Other machinery remembers the index to restore, and passes it to
+        # us.  If we simply bind self.index to `index`, then if the caller
+        # didn't pass a copy of the index, the caller's index will mutate
+        # when self.index mutates.  This can be a disaster if the caller is a
+        # savepoint to which the user rolls back again later (the savepoint
+        # loses the original index it passed).  Therefore, to be safe, we make
+        # a copy of the index here.  An alternative would be to ensure that
+        # all callers pass copies.  As is, our callers do not make copies.
+        self.index = index.copy()

Deleted: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/DB.py
===================================================================
--- Sandbox/wichert/ZODB38-jarn/src/ZODB/DB.py	2008-09-22 16:32:13 UTC (rev 91359)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/DB.py	2008-09-22 16:53:27 UTC (rev 91361)
@@ -1,856 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Database objects
-
-$Id$"""
-
-import warnings
-
-import cPickle, cStringIO, sys
-import threading
-from time import time, ctime
-import logging
-
-from ZODB.broken import find_global
-from ZODB.utils import z64
-from ZODB.Connection import Connection
-import ZODB.serialize
-from ZODB.utils import WeakSet
-
-from zope.interface import implements
-from ZODB.interfaces import IDatabase
-
-import transaction
-
-
-logger = logging.getLogger('ZODB.DB')
-
-class _ConnectionPool(object):
-    """Manage a pool of connections.
-
-    CAUTION:  Methods should be called under the protection of a lock.
-    This class does no locking of its own.
-
-    There's no limit on the number of connections this can keep track of,
-    but a warning is logged if there are more than pool_size active
-    connections, and a critical problem if more than twice pool_size.
-
-    New connections are registered via push().  This will log a message if
-    "too many" connections are active.
-
-    When a connection is explicitly closed, tell the pool via repush().
-    That adds the connection to a stack of connections available for
-    reuse, and throws away the oldest stack entries if the stack is too large.
-    pop() pops this stack.
-
-    When a connection is obtained via pop(), the pool holds only a weak
-    reference to it thereafter.  It's not necessary to inform the pool
-    if the connection goes away.  A connection handed out by pop() counts
-    against pool_size only so long as it exists, and provided it isn't
-    repush()'ed.  A weak reference is retained so that DB methods like
-    connectionDebugInfo() can still gather statistics.
-    """
-
-    def __init__(self, pool_size):
-        # The largest # of connections we expect to see alive simultaneously.
-        self.pool_size = pool_size
-
-        # A weak set of all connections we've seen.  A connection vanishes
-        # from this set if pop() hands it out, it's not reregistered via
-        # repush(), and it becomes unreachable.
-        self.all = WeakSet()
-
-        # A stack of connections available to hand out.  This is a subset
-        # of self.all.  push() and repush() add to this, and may remove
-        # the oldest available connections if the pool is too large.
-        # pop() pops this stack.  There are never more than pool_size entries
-        # in this stack.
-        # In Python 2.4, a collections.deque would make more sense than
-        # a list (we push only "on the right", but may pop from both ends).
-        self.available = []
-
-    def set_pool_size(self, pool_size):
-        """Change our belief about the expected maximum # of live connections.
-
-        If the pool_size is smaller than the current value, this may discard
-        the oldest available connections.
-        """
-        self.pool_size = pool_size
-        self._reduce_size()
-
-    def push(self, c):
-        """Register a new available connection.
-
-        We must not know about c already. c will be pushed onto the available
-        stack even if we're over the pool size limit.
-        """
-        assert c not in self.all
-        assert c not in self.available
-        self._reduce_size(strictly_less=True)
-        self.all.add(c)
-        self.available.append(c)
-        n = len(self.all)
-        limit = self.pool_size
-        if n > limit:
-            reporter = logger.warn
-            if n > 2 * limit:
-                reporter = logger.critical
-            reporter("DB.open() has %s open connections with a pool_size "
-                     "of %s", n, limit)
-
-    def repush(self, c):
-        """Reregister an available connection formerly obtained via pop().
-
-        This pushes it on the stack of available connections, and may discard
-        older available connections.
-        """
-        assert c in self.all
-        assert c not in self.available
-        self._reduce_size(strictly_less=True)
-        self.available.append(c)
-
-    def _reduce_size(self, strictly_less=False):
-        """Throw away the oldest available connections until we're under our
-        target size (strictly_less=False, the default) or no more than that
-        (strictly_less=True).
-        """
-        target = self.pool_size
-        if strictly_less:
-            target -= 1
-        while len(self.available) > target:
-            c = self.available.pop(0)
-            self.all.remove(c)
-            # While application code may still hold a reference to `c`,
-            # there's little useful that can be done with this Connection
-            # anymore.  Its cache may be holding on to limited resources,
-            # and we replace the cache with an empty one now so that we
-            # don't have to wait for gc to reclaim it.  Note that it's not
-            # possible for DB.open() to return `c` again:  `c` can never
-            # be in an open state again.
-            # TODO:  Perhaps it would be better to break the reference
-            # cycles between `c` and `c._cache`, so that refcounting reclaims
-            # both right now.  But if user code _does_ have a strong
-            # reference to `c` now, breaking the cycle would not reclaim `c`
-            # now, and `c` would be left in a user-visible crazy state.
-            c._resetCache()
-
-    def pop(self):
-        """Pop an available connection and return it.
-
-        Return None if none are available - in this case, the caller should
-        create a new connection, register it via push(), and call pop() again.
-        The caller is responsible for serializing this sequence.
-        """
-        result = None
-        if self.available:
-            result = self.available.pop()
-            # Leave it in self.all, so we can still get at it for statistics
-            # while it's alive.
-            assert result in self.all
-        return result
-
-    def map(self, f, open_connections=True):
-        """For every live connection c, invoke f(c).
-
-        If `open_connections` is false then only call f(c) on closed
-        connections.
-
-        """
-        if open_connections:
-            self.all.map(f)
-        else:
-            map(f, self.available)
-
-class DB(object):
-    """The Object Database
-    -------------------
-
-    The DB class coordinates the activities of multiple database
-    Connection instances.  Most of the work is done by the
-    Connections created via the open method.
-
-    The DB instance manages a pool of connections.  If a connection is
-    closed, it is returned to the pool and its object cache is
-    preserved.  A subsequent call to open() will reuse the connection.
-    There is no hard limit on the pool size.  If more than `pool_size`
-    connections are opened, a warning is logged, and if more than twice
-    that many, a critical problem is logged.
-
-    The class variable 'klass' is used by open() to create database
-    connections.  It is set to Connection, but a subclass could override
-    it to provide a different connection implementation.
-
-    The database provides a few methods intended for application code
-    -- open, close, undo, and pack -- and a large collection of
-    methods for inspecting the database and its connections' caches.
-
-    :Cvariables:
-      - `klass`: Class used by L{open} to create database connections
-
-    :Groups:
-      - `User Methods`: __init__, open, close, undo, pack, classFactory
-      - `Inspection Methods`: getName, getSize, objectCount,
-        getActivityMonitor, setActivityMonitor
-      - `Connection Pool Methods`: getPoolSize, getVersionPoolSize,
-        removeVersionPool, setPoolSize, setVersionPoolSize
-      - `Transaction Methods`: invalidate
-      - `Other Methods`: lastTransaction, connectionDebugInfo
-      - `Version Methods`: modifiedInVersion, abortVersion, commitVersion,
-        versionEmpty
-      - `Cache Inspection Methods`: cacheDetail, cacheExtremeDetail,
-        cacheFullSweep, cacheLastGCTime, cacheMinimize, cacheSize,
-        cacheDetailSize, getCacheSize, getVersionCacheSize, setCacheSize,
-        setVersionCacheSize
-    """
-    implements(IDatabase)
-
-    klass = Connection  # Class to use for connections
-    _activity_monitor = None
-
-    def __init__(self, storage,
-                 pool_size=7,
-                 cache_size=400,
-                 version_pool_size=3,
-                 version_cache_size=100,
-                 database_name='unnamed',
-                 databases=None,
-                 ):
-        """Create an object database.
-
-        :Parameters:
-          - `storage`: the storage used by the database, e.g. FileStorage
-          - `pool_size`: expected maximum number of open connections
-          - `cache_size`: target size of Connection object cache
-          - `version_pool_size`: expected maximum number of connections (per
-            version)
-          - `version_cache_size`: target size of Connection object cache for
-            version connections
-        """
-        # Allocate lock.
-        x = threading.RLock()
-        self._a = x.acquire
-        self._r = x.release
-
-        # Setup connection pools and cache info
-        # _pools maps a version string to a _ConnectionPool object.
-        self._pools = {}
-        self._pool_size = pool_size
-        self._cache_size = cache_size
-        self._version_pool_size = version_pool_size
-        self._version_cache_size = version_cache_size
-
-        # Setup storage
-        self._storage=storage
-        self.references = ZODB.serialize.referencesf
-        try:
-            storage.registerDB(self)
-        except TypeError:
-            storage.registerDB(self, None) # Backward compat
-
-        if (not hasattr(storage, 'tpc_vote')) and not storage.isReadOnly():
-            warnings.warn(
-                "Storage doesn't have a tpc_vote and this violates "
-                "the storage API. Violently monkeypatching in a do-nothing "
-                "tpc_vote.",
-                DeprecationWarning, 2)
-            storage.tpc_vote = lambda *args: None
-
-        try:
-            storage.load(z64, '')
-        except KeyError:
-            # Create the database's root in the storage if it doesn't exist
-            from persistent.mapping import PersistentMapping
-            root = PersistentMapping()
-            # Manually create a pickle for the root to put in the storage.
-            # The pickle must be in the special ZODB format.
-            file = cStringIO.StringIO()
-            p = cPickle.Pickler(file, 1)
-            p.dump((root.__class__, None))
-            p.dump(root.__getstate__())
-            t = transaction.Transaction()
-            t.description = 'initial database creation'
-            storage.tpc_begin(t)
-            storage.store(z64, None, file.getvalue(), '', t)
-            storage.tpc_vote(t)
-            storage.tpc_finish(t)
-
-        # Multi-database setup.
-        if databases is None:
-            databases = {}
-        self.databases = databases
-        self.database_name = database_name
-        if database_name in databases:
-            raise ValueError("database_name %r already in databases" %
-                             database_name)
-        databases[database_name] = self
-
-        self._setupUndoMethods()
-        self._setupVersionMethods()
-        self.history = storage.history
-
-    def _setupUndoMethods(self):
-        storage = self._storage
-        try:
-            self.supportsUndo = storage.supportsUndo
-        except AttributeError:
-            self.supportsUndo = lambda : False
-
-        if self.supportsUndo():
-            self.undoLog = storage.undoLog
-            if hasattr(storage, 'undoInfo'):
-                self.undoInfo = storage.undoInfo
-        else:
-            self.undoLog = self.undoInfo = lambda *a,**k: ()
-            def undo(*a, **k):
-                raise NotImplementedError
-            self.undo = undo
-
-    def _setupVersionMethods(self):
-        storage = self._storage
-        try:
-            self.supportsVersions = storage.supportsVersions
-        except AttributeError:
-            self.supportsVersions = lambda : False
-
-        if self.supportsVersions():
-            self.versionEmpty = storage.versionEmpty
-            self.versions = storage.versions
-            self.modifiedInVersion = storage.modifiedInVersion
-        else:
-            self.versionEmpty = lambda version: True
-            self.versions = lambda max=None: ()
-            self.modifiedInVersion = lambda oid: ''
-            def commitVersion(*a, **k):
-                raise NotImplementedError
-            self.commitVersion = self.abortVersion = commitVersion
-
-    # This is called by Connection.close().
-    def _returnToPool(self, connection):
-        """Return a connection to the pool.
-
-        connection._db must be self on entry.
-        """
-
-        self._a()
-        try:
-            assert connection._db is self
-            connection._opened = None
-
-            am = self._activity_monitor
-            if am is not None:
-                am.closedConnection(connection)
-
-            version = connection._version
-            try:
-                pool = self._pools[version]
-            except KeyError:
-                # No such version. We must have deleted the pool.
-                # Just let the connection go.
-
-                # We need to break circular refs to make it really go.
-                # TODO:  Figure out exactly which objects are involved in the
-                # cycle.
-                connection.__dict__.clear()
-                return
-            pool.repush(connection)
-
-        finally:
-            self._r()
-
-    def _connectionMap(self, f, open_connections=True):
-        """Call f(c) for all connections c in all pools in all versions.
-
-        If `open_connections` is false then f(c) is only called on closed
-        connections.
-
-        """
-        self._a()
-        try:
-            for pool in self._pools.values():
-                pool.map(f, open_connections=open_connections)
-        finally:
-            self._r()
-
-    def abortVersion(self, version, txn=None):
-        warnings.warn(
-            "Versions are deprecated and will become unsupported "
-            "in ZODB 3.9",
-            DeprecationWarning, 2)            
-        if txn is None:
-            txn = transaction.get()
-        txn.register(AbortVersion(self, version))
-
-    def cacheDetail(self):
-        """Return information on objects in the various caches
-
-        Organized by class.
-        """
-
-        detail = {}
-        def f(con, detail=detail):
-            for oid, ob in con._cache.items():
-                module = getattr(ob.__class__, '__module__', '')
-                module = module and '%s.' % module or ''
-                c = "%s%s" % (module, ob.__class__.__name__)
-                if c in detail:
-                    detail[c] += 1
-                else:
-                    detail[c] = 1
-
-        self._connectionMap(f)
-        detail = detail.items()
-        detail.sort()
-        return detail
-
-    def cacheExtremeDetail(self):
-        detail = []
-        conn_no = [0]  # A mutable reference to a counter
-        def f(con, detail=detail, rc=sys.getrefcount, conn_no=conn_no):
-            conn_no[0] += 1
-            cn = conn_no[0]
-            for oid, ob in con._cache_items():
-                id = ''
-                if hasattr(ob, '__dict__'):
-                    d = ob.__dict__
-                    if d.has_key('id'):
-                        id = d['id']
-                    elif d.has_key('__name__'):
-                        id = d['__name__']
-
-                module = getattr(ob.__class__, '__module__', '')
-                module = module and ('%s.' % module) or ''
-
-                # What refcount ('rc') should we return?  The intent is
-                # that we return the true Python refcount, but as if the
-                # cache didn't exist.  This routine adds 3 to the true
-                # refcount:  1 for binding to name 'ob', another because
-                # ob lives in the con._cache_items() list we're iterating
-                # over, and calling sys.getrefcount(ob) boosts ob's
-                # count by 1 too.  So the true refcount is 3 less than
-                # sys.getrefcount(ob) returns.  But, in addition to that,
-                # the cache holds an extra reference on non-ghost objects,
-                # and we also want to pretend that doesn't exist.
-                detail.append({
-                    'conn_no': cn,
-                    'oid': oid,
-                    'id': id,
-                    'klass': "%s%s" % (module, ob.__class__.__name__),
-                    'rc': rc(ob) - 3 - (ob._p_changed is not None),
-                    'state': ob._p_changed,
-                    #'references': con.references(oid),
-                    })
-
-        self._connectionMap(f)
-        return detail
-
-    def cacheFullSweep(self):
-        self._connectionMap(lambda c: c._cache.full_sweep())
-
-    def cacheLastGCTime(self):
-        m = [0]
-        def f(con, m=m):
-            t = con._cache.cache_last_gc_time
-            if t > m[0]:
-                m[0] = t
-
-        self._connectionMap(f)
-        return m[0]
-
-    def cacheMinimize(self):
-        self._connectionMap(lambda c: c._cache.minimize())
-
-    def cacheSize(self):
-        m = [0]
-        def f(con, m=m):
-            m[0] += con._cache.cache_non_ghost_count
-
-        self._connectionMap(f)
-        return m[0]
-
-    def cacheDetailSize(self):
-        m = []
-        def f(con, m=m):
-            m.append({'connection': repr(con),
-                      'ngsize': con._cache.cache_non_ghost_count,
-                      'size': len(con._cache)})
-        self._connectionMap(f)
-        m.sort()
-        return m
-
-    def close(self):
-        """Close the database and its underlying storage.
-
-        It is important to close the database, because the storage may
-        flush in-memory data structures to disk when it is closed.
-        Leaving the storage open with the process exits can cause the
-        next open to be slow.
-
-        What effect does closing the database have on existing
-        connections?  Technically, they remain open, but their storage
-        is closed, so they stop behaving usefully.  Perhaps close()
-        should also close all the Connections.
-        """
-        self._storage.close()
-
-    def commitVersion(self, source, destination='', txn=None):
-        warnings.warn(
-            "Versions are deprecated and will become unsupported "
-            "in ZODB 3.9",
-            DeprecationWarning, 2)            
-        if txn is None:
-            txn = transaction.get()
-        txn.register(CommitVersion(self, source, destination))
-
-    def getCacheSize(self):
-        return self._cache_size
-
-    def lastTransaction(self):
-        return self._storage.lastTransaction()
-
-    def getName(self):
-        return self._storage.getName()
-
-    def getPoolSize(self):
-        return self._pool_size
-
-    def getSize(self):
-        return self._storage.getSize()
-
-    def getVersionCacheSize(self):
-        warnings.warn(
-            "Versions are deprecated and will become unsupported "
-            "in ZODB 3.9",
-            DeprecationWarning, 2)            
-        return self._version_cache_size
-
-    def getVersionPoolSize(self):
-        warnings.warn(
-            "Versions are deprecated and will become unsupported "
-            "in ZODB 3.9",
-            DeprecationWarning, 2)            
-        return self._version_pool_size
-
-    def invalidate(self, tid, oids, connection=None, version=''):
-        """Invalidate references to a given oid.
-
-        This is used to indicate that one of the connections has committed a
-        change to the object.  The connection commiting the change should be
-        passed in to prevent useless (but harmless) messages to the
-        connection.
-        """
-        if connection is not None:
-            version = connection._version
-
-        # Notify connections.
-        def inval(c):
-            if (c is not connection and
-                  (not version or c._version == version)):
-                c.invalidate(tid, oids)
-        self._connectionMap(inval)
-
-    def invalidateCache(self):
-        """Invalidate each of the connection caches
-        """
-        self._connectionMap(lambda c: c.invalidateCache())
-
-    def objectCount(self):
-        return len(self._storage)
-
-    def open(self, version='', transaction_manager=None):
-        """Return a database Connection for use by application code.
-
-        The optional `version` argument can be used to specify that a
-        version connection is desired.
-
-        Note that the connection pool is managed as a stack, to
-        increase the likelihood that the connection's stack will
-        include useful objects.
-
-        :Parameters:
-          - `version`: the "version" that all changes will be made
-             in, defaults to no version.
-          - `transaction_manager`: transaction manager to use.  None means
-             use the default transaction manager.
-        """
-
-        if version:
-            if not self.supportsVersions():
-                raise ValueError(
-                    "Versions are not supported by this database.")
-            warnings.warn(
-                "Versions are deprecated and will become unsupported "
-                "in ZODB 3.9",
-                DeprecationWarning, 2)            
-
-        self._a()
-        try:
-            # pool <- the _ConnectionPool for this version
-            pool = self._pools.get(version)
-            if pool is None:
-                if version:
-                    size = self._version_pool_size
-                else:
-                    size = self._pool_size
-                self._pools[version] = pool = _ConnectionPool(size)
-            assert pool is not None
-
-            # result <- a connection
-            result = pool.pop()
-            if result is None:
-                if version:
-                    size = self._version_cache_size
-                else:
-                    size = self._cache_size
-                c = self.klass(self, version, size)
-                pool.push(c)
-                result = pool.pop()
-            assert result is not None
-
-            # Tell the connection it belongs to self.
-            result.open(transaction_manager)
-
-            # A good time to do some cache cleanup.
-            self._connectionMap(lambda c: c.cacheGC(), open_connections=False)
-
-            return result
-
-        finally:
-            self._r()
-
-    def removeVersionPool(self, version):
-        try:
-            del self._pools[version]
-        except KeyError:
-            pass
-
-    def connectionDebugInfo(self):
-        result = []
-        t = time()
-
-        def get_info(c):
-            # `result`, `time` and `version` are lexically inherited.
-            o = c._opened
-            d = c.getDebugInfo()
-            if d:
-                if len(d) == 1:
-                    d = d[0]
-            else:
-                d = ''
-            d = "%s (%s)" % (d, len(c._cache))
-
-            result.append({
-                'opened': o and ("%s (%.2fs)" % (ctime(o), t-o)),
-                'info': d,
-                'version': version,
-                })
-
-        for version, pool in self._pools.items():
-            pool.map(get_info)
-        return result
-
-    def getActivityMonitor(self):
-        return self._activity_monitor
-
-    def pack(self, t=None, days=0):
-        """Pack the storage, deleting unused object revisions.
-
-        A pack is always performed relative to a particular time, by
-        default the current time.  All object revisions that are not
-        reachable as of the pack time are deleted from the storage.
-
-        The cost of this operation varies by storage, but it is
-        usually an expensive operation.
-
-        There are two optional arguments that can be used to set the
-        pack time: t, pack time in seconds since the epcoh, and days,
-        the number of days to subtract from t or from the current
-        time if t is not specified.
-        """
-        if t is None:
-            t = time()
-        t -= days * 86400
-        try:
-            self._storage.pack(t, self.references)
-        except:
-            logger.error("packing", exc_info=True)
-            raise
-
-    def setActivityMonitor(self, am):
-        self._activity_monitor = am
-
-    def classFactory(self, connection, modulename, globalname):
-        # Zope will rebind this method to arbitrary user code at runtime.
-        return find_global(modulename, globalname)
-
-    def setCacheSize(self, size):
-        self._a()
-        try:
-            self._cache_size = size
-            pool = self._pools.get('')
-            if pool is not None:
-                def setsize(c):
-                    c._cache.cache_size = size
-                pool.map(setsize)
-        finally:
-            self._r()
-
-    def setVersionCacheSize(self, size):
-        warnings.warn(
-            "Versions are deprecated and will become unsupported "
-            "in ZODB 3.9",
-            DeprecationWarning, 2)            
-        self._a()
-        try:
-            self._version_cache_size = size
-            def setsize(c):
-                c._cache.cache_size = size
-            for version, pool in self._pools.items():
-                if version:
-                    pool.map(setsize)
-        finally:
-            self._r()
-
-    def setPoolSize(self, size):
-        self._pool_size = size
-        self._reset_pool_sizes(size, for_versions=False)
-
-    def setVersionPoolSize(self, size):
-        warnings.warn(
-            "Versions are deprecated and will become unsupported "
-            "in ZODB 3.9",
-            DeprecationWarning, 2)            
-        self._version_pool_size = size
-        self._reset_pool_sizes(size, for_versions=True)
-
-    def _reset_pool_sizes(self, size, for_versions=False):
-        self._a()
-        try:
-            for version, pool in self._pools.items():
-                if (version != '') == for_versions:
-                    pool.set_pool_size(size)
-        finally:
-            self._r()
-
-    def undo(self, id, txn=None):
-        """Undo a transaction identified by id.
-
-        A transaction can be undone if all of the objects involved in
-        the transaction were not modified subsequently, if any
-        modifications can be resolved by conflict resolution, or if
-        subsequent changes resulted in the same object state.
-
-        The value of id should be generated by calling undoLog()
-        or undoInfo().  The value of id is not the same as a
-        transaction id used by other methods; it is unique to undo().
-
-        :Parameters:
-          - `id`: a storage-specific transaction identifier
-          - `txn`: transaction context to use for undo().
-            By default, uses the current transaction.
-        """
-        if txn is None:
-            txn = transaction.get()
-        txn.register(TransactionalUndo(self, id))
-
-
-resource_counter_lock = threading.Lock()
-resource_counter = 0
-
-class ResourceManager(object):
-    """Transaction participation for a version or undo resource."""
-
-    # XXX This implementation is broken.  Subclasses invalidate oids
-    # in their commit calls. Invalidations should not be sent until
-    # tpc_finish is called.  In fact, invalidations should be sent to
-    # the db *while* tpc_finish is being called on the storage.
-
-    def __init__(self, db):
-        self._db = db
-        # Delegate the actual 2PC methods to the storage
-        self.tpc_vote = self._db._storage.tpc_vote
-        self.tpc_finish = self._db._storage.tpc_finish
-        self.tpc_abort = self._db._storage.tpc_abort
-
-        # Get a number from a simple thread-safe counter, then
-        # increment it, for the purpose of sorting ResourceManagers by
-        # creation order.  This ensures that multiple ResourceManagers
-        # within a transaction commit in a predictable sequence.
-        resource_counter_lock.acquire()
-        try:
-            global resource_counter
-            self._count = resource_counter
-            resource_counter += 1
-        finally:
-            resource_counter_lock.release()
-
-    def sortKey(self):
-        return "%s:%016x" % (self._db._storage.sortKey(), self._count)
-
-    def tpc_begin(self, txn, sub=False):
-        if sub:
-            raise ValueError("doesn't support sub-transactions")
-        self._db._storage.tpc_begin(txn)
-
-    # The object registers itself with the txn manager, so the ob
-    # argument to the methods below is self.
-
-    def abort(self, obj, txn):
-        raise NotImplementedError
-
-    def commit(self, obj, txn):
-        raise NotImplementedError
-
-class CommitVersion(ResourceManager):
-
-    def __init__(self, db, version, dest=''):
-        super(CommitVersion, self).__init__(db)
-        self._version = version
-        self._dest = dest
-
-    def commit(self, ob, t):
-        # XXX see XXX in ResourceManager
-        dest = self._dest
-        tid, oids = self._db._storage.commitVersion(self._version,
-                                                    self._dest,
-                                                    t)
-        oids = dict.fromkeys(oids, 1)
-        self._db.invalidate(tid, oids, version=self._dest)
-        if self._dest:
-            # the code above just invalidated the dest version.
-            # now we need to invalidate the source!
-            self._db.invalidate(tid, oids, version=self._version)
-
-class AbortVersion(ResourceManager):
-
-    def __init__(self, db, version):
-        super(AbortVersion, self).__init__(db)
-        self._version = version
-
-    def commit(self, ob, t):
-        # XXX see XXX in ResourceManager
-        tid, oids = self._db._storage.abortVersion(self._version, t)
-        self._db.invalidate(tid,
-                            dict.fromkeys(oids, 1),
-                            version=self._version)
-
-class TransactionalUndo(ResourceManager):
-
-    def __init__(self, db, tid):
-        super(TransactionalUndo, self).__init__(db)
-        self._tid = tid
-
-    def commit(self, ob, t):
-        # XXX see XXX in ResourceManager
-        tid, oids = self._db._storage.undo(self._tid, t)
-        self._db.invalidate(tid, dict.fromkeys(oids, 1))

Copied: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/DB.py (from rev 91360, Sandbox/wichert/ZODB38-jarn/src/ZODB/DB.py)
===================================================================
--- Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/DB.py	                        (rev 0)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/DB.py	2008-09-22 16:53:27 UTC (rev 91361)
@@ -0,0 +1,868 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE
+#
+##############################################################################
+"""Database objects
+
+$Id$"""
+
+import warnings
+
+import cPickle, cStringIO, sys
+import threading
+from time import time, ctime
+import logging
+
+from ZODB.broken import find_global
+from ZODB.utils import z64
+from ZODB.Connection import Connection
+import ZODB.serialize
+from ZODB.utils import WeakSet
+
+from zope.interface import implements
+from ZODB.interfaces import IDatabase
+
+import transaction
+
+
+logger = logging.getLogger('ZODB.DB')
+
+class _ConnectionPool(object):
+    """Manage a pool of connections.
+
+    CAUTION:  Methods should be called under the protection of a lock.
+    This class does no locking of its own.
+
+    There's no limit on the number of connections this can keep track of,
+    but a warning is logged if there are more than pool_size active
+    connections, and a critical problem if more than twice pool_size.
+
+    New connections are registered via push().  This will log a message if
+    "too many" connections are active.
+
+    When a connection is explicitly closed, tell the pool via repush().
+    That adds the connection to a stack of connections available for
+    reuse, and throws away the oldest stack entries if the stack is too large.
+    pop() pops this stack.
+
+    When a connection is obtained via pop(), the pool holds only a weak
+    reference to it thereafter.  It's not necessary to inform the pool
+    if the connection goes away.  A connection handed out by pop() counts
+    against pool_size only so long as it exists, and provided it isn't
+    repush()'ed.  A weak reference is retained so that DB methods like
+    connectionDebugInfo() can still gather statistics.
+    """
+
+    def __init__(self, pool_size):
+        # The largest # of connections we expect to see alive simultaneously.
+        self.pool_size = pool_size
+
+        # A weak set of all connections we've seen.  A connection vanishes
+        # from this set if pop() hands it out, it's not reregistered via
+        # repush(), and it becomes unreachable.
+        self.all = WeakSet()
+
+        # A stack of connections available to hand out.  This is a subset
+        # of self.all.  push() and repush() add to this, and may remove
+        # the oldest available connections if the pool is too large.
+        # pop() pops this stack.  There are never more than pool_size entries
+        # in this stack.
+        # In Python 2.4, a collections.deque would make more sense than
+        # a list (we push only "on the right", but may pop from both ends).
+        self.available = []
+
+    def set_pool_size(self, pool_size):
+        """Change our belief about the expected maximum # of live connections.
+
+        If the pool_size is smaller than the current value, this may discard
+        the oldest available connections.
+        """
+        self.pool_size = pool_size
+        self._reduce_size()
+
+    def push(self, c):
+        """Register a new available connection.
+
+        We must not know about c already. c will be pushed onto the available
+        stack even if we're over the pool size limit.
+        """
+        assert c not in self.all
+        assert c not in self.available
+        self._reduce_size(strictly_less=True)
+        self.all.add(c)
+        self.available.append(c)
+        n = len(self.all)
+        limit = self.pool_size
+        if n > limit:
+            reporter = logger.warn
+            if n > 2 * limit:
+                reporter = logger.critical
+            reporter("DB.open() has %s open connections with a pool_size "
+                     "of %s", n, limit)
+
+    def repush(self, c):
+        """Reregister an available connection formerly obtained via pop().
+
+        This pushes it on the stack of available connections, and may discard
+        older available connections.
+        """
+        assert c in self.all
+        assert c not in self.available
+        self._reduce_size(strictly_less=True)
+        self.available.append(c)
+
+    def _reduce_size(self, strictly_less=False):
+        """Throw away the oldest available connections until we're under our
+        target size (strictly_less=False, the default) or no more than that
+        (strictly_less=True).
+        """
+        target = self.pool_size
+        if strictly_less:
+            target -= 1
+        while len(self.available) > target:
+            c = self.available.pop(0)
+            self.all.remove(c)
+            # While application code may still hold a reference to `c`,
+            # there's little useful that can be done with this Connection
+            # anymore.  Its cache may be holding on to limited resources,
+            # and we replace the cache with an empty one now so that we
+            # don't have to wait for gc to reclaim it.  Note that it's not
+            # possible for DB.open() to return `c` again:  `c` can never
+            # be in an open state again.
+            # TODO:  Perhaps it would be better to break the reference
+            # cycles between `c` and `c._cache`, so that refcounting reclaims
+            # both right now.  But if user code _does_ have a strong
+            # reference to `c` now, breaking the cycle would not reclaim `c`
+            # now, and `c` would be left in a user-visible crazy state.
+            c._resetCache()
+
+    def pop(self):
+        """Pop an available connection and return it.
+
+        Return None if none are available - in this case, the caller should
+        create a new connection, register it via push(), and call pop() again.
+        The caller is responsible for serializing this sequence.
+        """
+        result = None
+        if self.available:
+            result = self.available.pop()
+            # Leave it in self.all, so we can still get at it for statistics
+            # while it's alive.
+            assert result in self.all
+        return result
+
+    def map(self, f, open_connections=True):
+        """For every live connection c, invoke f(c).
+
+        If `open_connections` is false then only call f(c) on closed
+        connections.
+
+        """
+        if open_connections:
+            self.all.map(f)
+        else:
+            map(f, self.available)
+
+class DB(object):
+    """The Object Database
+    -------------------
+
+    The DB class coordinates the activities of multiple database
+    Connection instances.  Most of the work is done by the
+    Connections created via the open method.
+
+    The DB instance manages a pool of connections.  If a connection is
+    closed, it is returned to the pool and its object cache is
+    preserved.  A subsequent call to open() will reuse the connection.
+    There is no hard limit on the pool size.  If more than `pool_size`
+    connections are opened, a warning is logged, and if more than twice
+    that many, a critical problem is logged.
+
+    The class variable 'klass' is used by open() to create database
+    connections.  It is set to Connection, but a subclass could override
+    it to provide a different connection implementation.
+
+    The database provides a few methods intended for application code
+    -- open, close, undo, and pack -- and a large collection of
+    methods for inspecting the database and its connections' caches.
+
+    :Cvariables:
+      - `klass`: Class used by L{open} to create database connections
+
+    :Groups:
+      - `User Methods`: __init__, open, close, undo, pack, classFactory
+      - `Inspection Methods`: getName, getSize, objectCount,
+        getActivityMonitor, setActivityMonitor
+      - `Connection Pool Methods`: getPoolSize, getVersionPoolSize,
+        removeVersionPool, setPoolSize, setVersionPoolSize
+      - `Transaction Methods`: invalidate
+      - `Other Methods`: lastTransaction, connectionDebugInfo
+      - `Version Methods`: modifiedInVersion, abortVersion, commitVersion,
+        versionEmpty
+      - `Cache Inspection Methods`: cacheDetail, cacheExtremeDetail,
+        cacheFullSweep, cacheLastGCTime, cacheMinimize, cacheSize,
+        cacheDetailSize, getCacheSize, getVersionCacheSize, setCacheSize,
+        setVersionCacheSize
+    """
+    implements(IDatabase)
+
+    klass = Connection  # Class to use for connections
+    _activity_monitor = None
+
+    def __init__(self, storage,
+                 pool_size=7,
+                 cache_size=400,
+                 cache_size_bytes=0,
+                 version_pool_size=3,
+                 version_cache_size=100,
+                 database_name='unnamed',
+                 databases=None,
+                 ):
+        """Create an object database.
+
+        :Parameters:
+          - `storage`: the storage used by the database, e.g. FileStorage
+          - `pool_size`: expected maximum number of open connections
+          - `cache_size`: target size of Connection object cache
+          - `cache_size_bytes`: target size measured in total estimated size
+               of objects in the Connection object cache.
+               "0" means unlimited.
+          - `version_pool_size`: expected maximum number of connections (per
+            version)
+          - `version_cache_size`: target size of Connection object cache for
+            version connections
+          - `historical_pool_size`: expected maximum number of total
+            historical connections
+        """
+        # Allocate lock.
+        x = threading.RLock()
+        self._a = x.acquire
+        self._r = x.release
+
+        # Setup connection pools and cache info
+        # _pools maps a version string to a _ConnectionPool object.
+        self._pools = {}
+        self._pool_size = pool_size
+        self._cache_size = cache_size
+        self._version_pool_size = version_pool_size
+        self._version_cache_size = version_cache_size
+        self._cache_size_bytes = cache_size_bytes
+
+        # Setup storage
+        self._storage=storage
+        self.references = ZODB.serialize.referencesf
+        try:
+            storage.registerDB(self)
+        except TypeError:
+            storage.registerDB(self, None) # Backward compat
+
+        if (not hasattr(storage, 'tpc_vote')) and not storage.isReadOnly():
+            warnings.warn(
+                "Storage doesn't have a tpc_vote and this violates "
+                "the storage API. Violently monkeypatching in a do-nothing "
+                "tpc_vote.",
+                DeprecationWarning, 2)
+            storage.tpc_vote = lambda *args: None
+
+        try:
+            storage.load(z64, '')
+        except KeyError:
+            # Create the database's root in the storage if it doesn't exist
+            from persistent.mapping import PersistentMapping
+            root = PersistentMapping()
+            # Manually create a pickle for the root to put in the storage.
+            # The pickle must be in the special ZODB format.
+            file = cStringIO.StringIO()
+            p = cPickle.Pickler(file, 1)
+            p.dump((root.__class__, None))
+            p.dump(root.__getstate__())
+            t = transaction.Transaction()
+            t.description = 'initial database creation'
+            storage.tpc_begin(t)
+            storage.store(z64, None, file.getvalue(), '', t)
+            storage.tpc_vote(t)
+            storage.tpc_finish(t)
+
+        # Multi-database setup.
+        if databases is None:
+            databases = {}
+        self.databases = databases
+        self.database_name = database_name
+        if database_name in databases:
+            raise ValueError("database_name %r already in databases" %
+                             database_name)
+        databases[database_name] = self
+
+        self._setupUndoMethods()
+        self._setupVersionMethods()
+        self.history = storage.history
+
+    def _setupUndoMethods(self):
+        storage = self._storage
+        try:
+            self.supportsUndo = storage.supportsUndo
+        except AttributeError:
+            self.supportsUndo = lambda : False
+
+        if self.supportsUndo():
+            self.undoLog = storage.undoLog
+            if hasattr(storage, 'undoInfo'):
+                self.undoInfo = storage.undoInfo
+        else:
+            self.undoLog = self.undoInfo = lambda *a,**k: ()
+            def undo(*a, **k):
+                raise NotImplementedError
+            self.undo = undo
+
+    def _setupVersionMethods(self):
+        storage = self._storage
+        try:
+            self.supportsVersions = storage.supportsVersions
+        except AttributeError:
+            self.supportsVersions = lambda : False
+
+        if self.supportsVersions():
+            self.versionEmpty = storage.versionEmpty
+            self.versions = storage.versions
+            self.modifiedInVersion = storage.modifiedInVersion
+        else:
+            self.versionEmpty = lambda version: True
+            self.versions = lambda max=None: ()
+            self.modifiedInVersion = lambda oid: ''
+            def commitVersion(*a, **k):
+                raise NotImplementedError
+            self.commitVersion = self.abortVersion = commitVersion
+
+    # This is called by Connection.close().
+    def _returnToPool(self, connection):
+        """Return a connection to the pool.
+
+        connection._db must be self on entry.
+        """
+
+        self._a()
+        try:
+            assert connection._db is self
+            connection._opened = None
+
+            am = self._activity_monitor
+            if am is not None:
+                am.closedConnection(connection)
+
+            version = connection._version
+            try:
+                pool = self._pools[version]
+            except KeyError:
+                # No such version. We must have deleted the pool.
+                # Just let the connection go.
+
+                # We need to break circular refs to make it really go.
+                # TODO:  Figure out exactly which objects are involved in the
+                # cycle.
+                connection.__dict__.clear()
+                return
+            pool.repush(connection)
+
+        finally:
+            self._r()
+
+    def _connectionMap(self, f, open_connections=True):
+        """Call f(c) for all connections c in all pools in all versions.
+
+        If `open_connections` is false then f(c) is only called on closed
+        connections.
+
+        """
+        self._a()
+        try:
+            for pool in self._pools.values():
+                pool.map(f, open_connections=open_connections)
+        finally:
+            self._r()
+
+    def abortVersion(self, version, txn=None):
+        warnings.warn(
+            "Versions are deprecated and will become unsupported "
+            "in ZODB 3.9",
+            DeprecationWarning, 2)            
+        if txn is None:
+            txn = transaction.get()
+        txn.register(AbortVersion(self, version))
+
+    def cacheDetail(self):
+        """Return information on objects in the various caches
+
+        Organized by class.
+        """
+
+        detail = {}
+        def f(con, detail=detail):
+            for oid, ob in con._cache.items():
+                module = getattr(ob.__class__, '__module__', '')
+                module = module and '%s.' % module or ''
+                c = "%s%s" % (module, ob.__class__.__name__)
+                if c in detail:
+                    detail[c] += 1
+                else:
+                    detail[c] = 1
+
+        self._connectionMap(f)
+        detail = detail.items()
+        detail.sort()
+        return detail
+
+    def cacheExtremeDetail(self):
+        detail = []
+        conn_no = [0]  # A mutable reference to a counter
+        def f(con, detail=detail, rc=sys.getrefcount, conn_no=conn_no):
+            conn_no[0] += 1
+            cn = conn_no[0]
+            for oid, ob in con._cache_items():
+                id = ''
+                if hasattr(ob, '__dict__'):
+                    d = ob.__dict__
+                    if d.has_key('id'):
+                        id = d['id']
+                    elif d.has_key('__name__'):
+                        id = d['__name__']
+
+                module = getattr(ob.__class__, '__module__', '')
+                module = module and ('%s.' % module) or ''
+
+                # What refcount ('rc') should we return?  The intent is
+                # that we return the true Python refcount, but as if the
+                # cache didn't exist.  This routine adds 3 to the true
+                # refcount:  1 for binding to name 'ob', another because
+                # ob lives in the con._cache_items() list we're iterating
+                # over, and calling sys.getrefcount(ob) boosts ob's
+                # count by 1 too.  So the true refcount is 3 less than
+                # sys.getrefcount(ob) returns.  But, in addition to that,
+                # the cache holds an extra reference on non-ghost objects,
+                # and we also want to pretend that doesn't exist.
+                detail.append({
+                    'conn_no': cn,
+                    'oid': oid,
+                    'id': id,
+                    'klass': "%s%s" % (module, ob.__class__.__name__),
+                    'rc': rc(ob) - 3 - (ob._p_changed is not None),
+                    'state': ob._p_changed,
+                    #'references': con.references(oid),
+                    })
+
+        self._connectionMap(f)
+        return detail
+
+    def cacheFullSweep(self):
+        self._connectionMap(lambda c: c._cache.full_sweep())
+
+    def cacheLastGCTime(self):
+        m = [0]
+        def f(con, m=m):
+            t = con._cache.cache_last_gc_time
+            if t > m[0]:
+                m[0] = t
+
+        self._connectionMap(f)
+        return m[0]
+
+    def cacheMinimize(self):
+        self._connectionMap(lambda c: c._cache.minimize())
+
+    def cacheSize(self):
+        m = [0]
+        def f(con, m=m):
+            m[0] += con._cache.cache_non_ghost_count
+
+        self._connectionMap(f)
+        return m[0]
+
+    def cacheDetailSize(self):
+        m = []
+        def f(con, m=m):
+            m.append({'connection': repr(con),
+                      'ngsize': con._cache.cache_non_ghost_count,
+                      'size': len(con._cache)})
+        self._connectionMap(f)
+        m.sort()
+        return m
+
+    def close(self):
+        """Close the database and its underlying storage.
+
+        It is important to close the database, because the storage may
+        flush in-memory data structures to disk when it is closed.
+        Leaving the storage open with the process exits can cause the
+        next open to be slow.
+
+        What effect does closing the database have on existing
+        connections?  Technically, they remain open, but their storage
+        is closed, so they stop behaving usefully.  Perhaps close()
+        should also close all the Connections.
+        """
+        self._storage.close()
+
+    def commitVersion(self, source, destination='', txn=None):
+        warnings.warn(
+            "Versions are deprecated and will become unsupported "
+            "in ZODB 3.9",
+            DeprecationWarning, 2)            
+        if txn is None:
+            txn = transaction.get()
+        txn.register(CommitVersion(self, source, destination))
+
+    def getCacheSize(self):
+        return self._cache_size
+
+    def getCacheSizeBytes(self):
+        return self._cache_size_bytes
+
+    def lastTransaction(self):
+        return self._storage.lastTransaction()
+
+    def getName(self):
+        return self._storage.getName()
+
+    def getPoolSize(self):
+        return self._pool_size
+
+    def getSize(self):
+        return self._storage.getSize()
+
+    def getVersionCacheSize(self):
+        warnings.warn(
+            "Versions are deprecated and will become unsupported "
+            "in ZODB 3.9",
+            DeprecationWarning, 2)            
+        return self._version_cache_size
+
+    def getVersionPoolSize(self):
+        warnings.warn(
+            "Versions are deprecated and will become unsupported "
+            "in ZODB 3.9",
+            DeprecationWarning, 2)            
+        return self._version_pool_size
+
+    def invalidate(self, tid, oids, connection=None, version=''):
+        """Invalidate references to a given oid.
+
+        This is used to indicate that one of the connections has committed a
+        change to the object.  The connection commiting the change should be
+        passed in to prevent useless (but harmless) messages to the
+        connection.
+        """
+        if connection is not None:
+            version = connection._version
+
+        # Notify connections.
+        def inval(c):
+            if (c is not connection and
+                  (not version or c._version == version)):
+                c.invalidate(tid, oids)
+        self._connectionMap(inval)
+
+    def invalidateCache(self):
+        """Invalidate each of the connection caches
+        """
+        self._connectionMap(lambda c: c.invalidateCache())
+
+    def objectCount(self):
+        return len(self._storage)
+
+    def open(self, version='', transaction_manager=None):
+        """Return a database Connection for use by application code.
+
+        The optional `version` argument can be used to specify that a
+        version connection is desired.
+
+        Note that the connection pool is managed as a stack, to
+        increase the likelihood that the connection's stack will
+        include useful objects.
+
+        :Parameters:
+          - `version`: the "version" that all changes will be made
+             in, defaults to no version.
+          - `transaction_manager`: transaction manager to use.  None means
+             use the default transaction manager.
+        """
+
+        if version:
+            if not self.supportsVersions():
+                raise ValueError(
+                    "Versions are not supported by this database.")
+            warnings.warn(
+                "Versions are deprecated and will become unsupported "
+                "in ZODB 3.9",
+                DeprecationWarning, 2)            
+
+        self._a()
+        try:
+            # pool <- the _ConnectionPool for this version
+            pool = self._pools.get(version)
+            if pool is None:
+                if version:
+                    size = self._version_pool_size
+                else:
+                    size = self._pool_size
+                self._pools[version] = pool = _ConnectionPool(size)
+            assert pool is not None
+
+            # result <- a connection
+            result = pool.pop()
+            if result is None:
+                if version:
+                    size = self._version_cache_size
+                else:
+                    size = self._cache_size
+                c = self.klass(self, version, size, self._cache_size_bytes)
+                pool.push(c)
+                result = pool.pop()
+            assert result is not None
+
+            # Tell the connection it belongs to self.
+            result.open(transaction_manager)
+
+            # A good time to do some cache cleanup.
+            self._connectionMap(lambda c: c.cacheGC(), open_connections=False)
+
+            return result
+
+        finally:
+            self._r()
+
+    def removeVersionPool(self, version):
+        try:
+            del self._pools[version]
+        except KeyError:
+            pass
+
+    def connectionDebugInfo(self):
+        result = []
+        t = time()
+
+        def get_info(c):
+            # `result`, `time` and `version` are lexically inherited.
+            o = c._opened
+            d = c.getDebugInfo()
+            if d:
+                if len(d) == 1:
+                    d = d[0]
+            else:
+                d = ''
+            d = "%s (%s)" % (d, len(c._cache))
+
+            result.append({
+                'opened': o and ("%s (%.2fs)" % (ctime(o), t-o)),
+                'info': d,
+                'version': version,
+                })
+
+        for version, pool in self._pools.items():
+            pool.map(get_info)
+        return result
+
+    def getActivityMonitor(self):
+        return self._activity_monitor
+
+    def pack(self, t=None, days=0):
+        """Pack the storage, deleting unused object revisions.
+
+        A pack is always performed relative to a particular time, by
+        default the current time.  All object revisions that are not
+        reachable as of the pack time are deleted from the storage.
+
+        The cost of this operation varies by storage, but it is
+        usually an expensive operation.
+
+        There are two optional arguments that can be used to set the
+        pack time: t, pack time in seconds since the epcoh, and days,
+        the number of days to subtract from t or from the current
+        time if t is not specified.
+        """
+        if t is None:
+            t = time()
+        t -= days * 86400
+        try:
+            self._storage.pack(t, self.references)
+        except:
+            logger.error("packing", exc_info=True)
+            raise
+
+    def setActivityMonitor(self, am):
+        self._activity_monitor = am
+
+    def classFactory(self, connection, modulename, globalname):
+        # Zope will rebind this method to arbitrary user code at runtime.
+        return find_global(modulename, globalname)
+
+    def setCacheSize(self, size):
+        self._a()
+        try:
+            self._cache_size = size
+            pool = self._pools.get('')
+            if pool is not None:
+                def setsize(c):
+                    c._cache.cache_size = size
+                pool.map(setsize)
+        finally:
+            self._r()
+
+    def setVersionCacheSize(self, size):
+        warnings.warn(
+            "Versions are deprecated and will become unsupported "
+            "in ZODB 3.9",
+            DeprecationWarning, 2)            
+
+    def setCacheSizeBytes(self, size):
+        self._a()
+        try:
+            self._cache_size_bytes = size
+            pool = self._pools.get('')
+            if pool is not None:
+                def setsize(c):
+                    c._cache.cache_size_bytes = size
+                pool.map(setsize)
+        finally:
+            self._r()
+
+    def setPoolSize(self, size):
+        self._pool_size = size
+        self._reset_pool_sizes(size, for_versions=False)
+
+    def setVersionPoolSize(self, size):
+        warnings.warn(
+            "Versions are deprecated and will become unsupported "
+            "in ZODB 3.9",
+            DeprecationWarning, 2)            
+        self._version_pool_size = size
+        self._reset_pool_sizes(size, for_versions=True)
+
+    def _reset_pool_sizes(self, size, for_versions=False):
+        self._a()
+        try:
+            for version, pool in self._pools.items():
+                if (version != '') == for_versions:
+                    pool.set_pool_size(size)
+        finally:
+            self._r()
+
+    def undo(self, id, txn=None):
+        """Undo a transaction identified by id.
+
+        A transaction can be undone if all of the objects involved in
+        the transaction were not modified subsequently, if any
+        modifications can be resolved by conflict resolution, or if
+        subsequent changes resulted in the same object state.
+
+        The value of id should be generated by calling undoLog()
+        or undoInfo().  The value of id is not the same as a
+        transaction id used by other methods; it is unique to undo().
+
+        :Parameters:
+          - `id`: a storage-specific transaction identifier
+          - `txn`: transaction context to use for undo().
+            By default, uses the current transaction.
+        """
+        if txn is None:
+            txn = transaction.get()
+        txn.register(TransactionalUndo(self, id))
+
+
+resource_counter_lock = threading.Lock()
+resource_counter = 0
+
+class ResourceManager(object):
+    """Transaction participation for a version or undo resource."""
+
+    # XXX This implementation is broken.  Subclasses invalidate oids
+    # in their commit calls. Invalidations should not be sent until
+    # tpc_finish is called.  In fact, invalidations should be sent to
+    # the db *while* tpc_finish is being called on the storage.
+
+    def __init__(self, db):
+        self._db = db
+        # Delegate the actual 2PC methods to the storage
+        self.tpc_vote = self._db._storage.tpc_vote
+        self.tpc_finish = self._db._storage.tpc_finish
+        self.tpc_abort = self._db._storage.tpc_abort
+
+        # Get a number from a simple thread-safe counter, then
+        # increment it, for the purpose of sorting ResourceManagers by
+        # creation order.  This ensures that multiple ResourceManagers
+        # within a transaction commit in a predictable sequence.
+        resource_counter_lock.acquire()
+        try:
+            global resource_counter
+            self._count = resource_counter
+            resource_counter += 1
+        finally:
+            resource_counter_lock.release()
+
+    def sortKey(self):
+        return "%s:%016x" % (self._db._storage.sortKey(), self._count)
+
+    def tpc_begin(self, txn, sub=False):
+        if sub:
+            raise ValueError("doesn't support sub-transactions")
+        self._db._storage.tpc_begin(txn)
+
+    # The object registers itself with the txn manager, so the ob
+    # argument to the methods below is self.
+
+    def abort(self, obj, txn):
+        raise NotImplementedError
+
+    def commit(self, obj, txn):
+        raise NotImplementedError
+
+class CommitVersion(ResourceManager):
+
+    def __init__(self, db, version, dest=''):
+        super(CommitVersion, self).__init__(db)
+        self._version = version
+        self._dest = dest
+
+    def commit(self, ob, t):
+        # XXX see XXX in ResourceManager
+        dest = self._dest
+        tid, oids = self._db._storage.commitVersion(self._version,
+                                                    self._dest,
+                                                    t)
+        oids = dict.fromkeys(oids, 1)
+        self._db.invalidate(tid, oids, version=self._dest)
+        if self._dest:
+            # the code above just invalidated the dest version.
+            # now we need to invalidate the source!
+            self._db.invalidate(tid, oids, version=self._version)
+
+class AbortVersion(ResourceManager):
+
+    def __init__(self, db, version):
+        super(AbortVersion, self).__init__(db)
+        self._version = version
+
+    def commit(self, ob, t):
+        # XXX see XXX in ResourceManager
+        tid, oids = self._db._storage.abortVersion(self._version, t)
+        self._db.invalidate(tid,
+                            dict.fromkeys(oids, 1),
+                            version=self._version)
+
+class TransactionalUndo(ResourceManager):
+
+    def __init__(self, db, tid):
+        super(TransactionalUndo, self).__init__(db)
+        self._tid = tid
+
+    def commit(self, ob, t):
+        # XXX see XXX in ResourceManager
+        tid, oids = self._db._storage.undo(self._tid, t)
+        self._db.invalidate(tid, dict.fromkeys(oids, 1))

Deleted: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/component.xml
===================================================================
--- Sandbox/wichert/ZODB38-jarn/src/ZODB/component.xml	2008-09-22 16:32:13 UTC (rev 91359)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/component.xml	2008-09-22 16:53:27 UTC (rev 91361)
@@ -1,230 +0,0 @@
-<component prefix="ZODB.config">
-
-  <!-- TODO needs descriptions for everything -->
-
-  <abstracttype name="ZODB.storage"/>
-  <abstracttype name="ZODB.database"/>
-
-  <sectiontype name="filestorage" datatype=".FileStorage"
-               implements="ZODB.storage">
-    <key name="path" required="yes">
-      <description>
-        Path name to the main storage file.  The names for
-        supplemental files, including index and lock files, will be
-        computed from this.
-      </description>
-    </key>
-    <key name="create" datatype="boolean" default="false">
-      <description>
-        Flag that indicates whether the storage should be truncated if
-        it already exists.
-      </description>
-    </key>
-    <key name="read-only" datatype="boolean" default="false">
-      <description>
-        If true, only reads may be executed against the storage.  Note
-        that the "pack" operation is not considered a write operation
-        and is still allowed on a read-only filestorage.
-      </description>
-    </key>
-    <key name="quota" datatype="byte-size">
-      <description>
-        Maximum allowed size of the storage file.  Operations which
-        would cause the size of the storage to exceed the quota will
-        result in a ZODB.FileStorage.FileStorageQuotaError being
-        raised.
-      </description>
-    </key>
-  </sectiontype>
-
-  <sectiontype name="mappingstorage" datatype=".MappingStorage"
-               implements="ZODB.storage">
-    <key name="name" default="Mapping Storage"/>
-  </sectiontype>
-
-  <!-- The BDB storages probably need to be revised somewhat still.
-       The extension relationship seems a little odd.
-    -->
-  <sectiontype name="fullstorage" datatype=".BDBFullStorage"
-               implements="ZODB.storage">
-    <key name="envdir" required="yes" />
-    <key name="interval" datatype="time-interval" default="2m" />
-    <key name="kbyte" datatype="integer" default="0" />
-    <key name="min" datatype="integer" default="0" />
-    <key name="logdir" />
-    <key name="cachesize" datatype="byte-size" default="128MB" />
-    <key name="frequency" datatype="time-interval" default="0" />
-    <key name="packtime" datatype="time-interval" default="4h" />
-    <key name="gcpack" datatype="integer" default="0" />
-    <key name="read-only" datatype="boolean" default="off"/>
-  </sectiontype>
-
-  <sectiontype name="minimalstorage" datatype=".BDBMinimalStorage"
-               implements="ZODB.storage" extends="fullstorage"/>
-
-  <sectiontype name="zeoclient" datatype=".ZEOClient"
-               implements="ZODB.storage">
-    <multikey name="server" datatype="socket-connection-address" required="yes"/>
-    <key name="blob-dir" required="no">
-      <description>
-        Path name to the blob cache directory.
-      </description>
-    </key>
-    <key name="shared-blob-dir" required="no" default="no"
-        datatype="boolean">
-      <description>
-          Tells whether the cache is a shared writable directory
-          and that the ZEO protocol should not transfer the file
-          but only the filename when committing.
-      </description>
-    </key>
-
-    <key name="storage" default="1">
-      <description>
-        The name of the storage that the client wants to use.  If the
-        ZEO server serves more than one storage, the client selects
-        the storage it wants to use by name.  The default name is '1',
-        which is also the default name for the ZEO server.
-      </description>
-    </key>
-    <key name="cache-size" datatype="byte-size" default="20MB">
-      <description>
-        The maximum size of the client cache, in bytes, KB or MB.
-      </description>
-    </key>
-    <key name="name" default="">
-      <description>
-        The storage name.  If unspecified, the address of the server
-        will be used as the name.
-      </description>
-    </key>
-    <key name="client">
-      <description>
-        Enables persistent cache files.  The string passed here is
-        used to construct the cache filenames.  If it is not
-        specified, the client creates a temporary cache that will
-        only be used by the current object.
-      </description>
-    </key>
-    <key name="var">
-      <description>
-        The directory where persistent cache files are stored.  By
-        default cache files, if they are persistent, are stored in
-        the current directory.
-      </description>
-    </key>
-    <key name="min-disconnect-poll" datatype="integer" default="5">
-      <description>
-        The minimum delay in seconds between attempts to connect to
-        the server, in seconds.  Defaults to 5 seconds.
-      </description>
-    </key>
-    <key name="max-disconnect-poll" datatype="integer" default="300">
-      <description>
-        The maximum delay in seconds between attempts to connect to
-        the server, in seconds.  Defaults to 300 seconds.
-      </description>
-    </key>
-    <key name="wait" datatype="boolean" default="on">
-      <description>
-        A boolean indicating whether the constructor should wait
-        for the client to connect to the server and verify the cache
-        before returning.  The default is true.
-      </description>
-    </key>
-    <key name="read-only" datatype="boolean" default="off">
-      <description>
-        A flag indicating whether this should be a read-only storage,
-        defaulting to false (i.e. writing is allowed by default).
-      </description>
-    </key>
-    <key name="read-only-fallback" datatype="boolean" default="off">
-      <description>
-        A flag indicating whether a read-only remote storage should be
-        acceptable as a fallback when no writable storages are
-        available.  Defaults to false.  At most one of read_only and
-        read_only_fallback should be true.
-      </description>
-    </key>
-    <key name="username" required="no">
-      <description>
-        The authentication username of the server.
-      </description>
-    </key>
-    <key name="password" required="no">
-      <description>
-        The authentication password of the server.
-      </description>
-    </key>
-    <key name="realm" required="no">
-      <description>
-        The authentication realm of the server.  Some authentication
-        schemes use a realm to identify the logic set of usernames
-        that are accepted by this server.
-      </description>
-    </key>
-  </sectiontype>
-
-  <sectiontype name="demostorage" datatype=".DemoStorage"
-               implements="ZODB.storage">
-    <key name="name" default="Demo Storage"/>
-    <section type="ZODB.storage" name="*" attribute="base"/>
-    <key name="quota" datatype="integer"/>
-  </sectiontype>
-
-
-  <sectiontype name="zodb" datatype=".ZODBDatabase"
-               implements="ZODB.database">
-    <section type="ZODB.storage" name="*" attribute="storage"/>
-    <key name="cache-size" datatype="integer" default="5000"/>
-      <description>
-        Target size, in number of objects, of each connection's
-        object cache.
-      </description>
-    <key name="pool-size" datatype="integer" default="7"/>
-      <description>
-        The expected maximum number of simultaneously open connections.
-        There is no hard limit (as many connections as are requested
-        will be opened, until system resources are exhausted).  Exceeding
-        pool-size connections causes a warning message to be logged,
-        and exceeding twice pool-size connections causes a critical
-        message to be logged.
-      </description>
-    <key name="version-pool-size" datatype="integer" default="3"/>
-      <description>
-        The expected maximum number of connections simultaneously open
-        per version.
-      </description>
-    <key name="version-cache-size" datatype="integer" default="100"/>
-      <description>
-        Target size, in number of objects, of each version connection's
-        object cache.
-      </description>
-    <key name="database-name" default="unnamed"/>
-      <description>
-        When multidatabases are in use, this is the name given to this
-        database in the collection.  The name must be unique across all
-        databases in the collection.  The collection must also be given
-        a mapping from its databases' names to their databases, but that
-        cannot be specified in a ZODB config file.  Applications using
-        multidatabases typical supply a way to configure the mapping in
-        their own config files, using the "databases" parameter of a DB
-        constructor.
-      </description>
-  </sectiontype>
-
-  <sectiontype name="blobstorage" datatype=".BlobStorage"
-    implements="ZODB.storage">
-    <key name="blob-dir" required="yes">
-      <description>
-        Path name to the blob storage directory.
-      </description>
-    </key>
-    <section type="ZODB.storage" name="*" attribute="base"/>
-  </sectiontype>
-
-
-    
-
-
-</component>

Copied: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/component.xml (from rev 91360, Sandbox/wichert/ZODB38-jarn/src/ZODB/component.xml)
===================================================================
--- Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/component.xml	                        (rev 0)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/component.xml	2008-09-22 16:53:27 UTC (rev 91361)
@@ -0,0 +1,237 @@
+<component prefix="ZODB.config">
+
+  <!-- TODO needs descriptions for everything -->
+
+  <abstracttype name="ZODB.storage"/>
+  <abstracttype name="ZODB.database"/>
+
+  <sectiontype name="filestorage" datatype=".FileStorage"
+               implements="ZODB.storage">
+    <key name="path" required="yes">
+      <description>
+        Path name to the main storage file.  The names for
+        supplemental files, including index and lock files, will be
+        computed from this.
+      </description>
+    </key>
+    <key name="create" datatype="boolean" default="false">
+      <description>
+        Flag that indicates whether the storage should be truncated if
+        it already exists.
+      </description>
+    </key>
+    <key name="read-only" datatype="boolean" default="false">
+      <description>
+        If true, only reads may be executed against the storage.  Note
+        that the "pack" operation is not considered a write operation
+        and is still allowed on a read-only filestorage.
+      </description>
+    </key>
+    <key name="quota" datatype="byte-size">
+      <description>
+        Maximum allowed size of the storage file.  Operations which
+        would cause the size of the storage to exceed the quota will
+        result in a ZODB.FileStorage.FileStorageQuotaError being
+        raised.
+      </description>
+    </key>
+  </sectiontype>
+
+  <sectiontype name="mappingstorage" datatype=".MappingStorage"
+               implements="ZODB.storage">
+    <key name="name" default="Mapping Storage"/>
+  </sectiontype>
+
+  <!-- The BDB storages probably need to be revised somewhat still.
+       The extension relationship seems a little odd.
+    -->
+  <sectiontype name="fullstorage" datatype=".BDBFullStorage"
+               implements="ZODB.storage">
+    <key name="envdir" required="yes" />
+    <key name="interval" datatype="time-interval" default="2m" />
+    <key name="kbyte" datatype="integer" default="0" />
+    <key name="min" datatype="integer" default="0" />
+    <key name="logdir" />
+    <key name="cachesize" datatype="byte-size" default="128MB" />
+    <key name="frequency" datatype="time-interval" default="0" />
+    <key name="packtime" datatype="time-interval" default="4h" />
+    <key name="gcpack" datatype="integer" default="0" />
+    <key name="read-only" datatype="boolean" default="off"/>
+  </sectiontype>
+
+  <sectiontype name="minimalstorage" datatype=".BDBMinimalStorage"
+               implements="ZODB.storage" extends="fullstorage"/>
+
+  <sectiontype name="zeoclient" datatype=".ZEOClient"
+               implements="ZODB.storage">
+    <multikey name="server" datatype="socket-connection-address" required="yes"/>
+    <key name="blob-dir" required="no">
+      <description>
+        Path name to the blob cache directory.
+      </description>
+    </key>
+    <key name="shared-blob-dir" required="no" default="no"
+        datatype="boolean">
+      <description>
+          Tells whether the cache is a shared writable directory
+          and that the ZEO protocol should not transfer the file
+          but only the filename when committing.
+      </description>
+    </key>
+
+    <key name="storage" default="1">
+      <description>
+        The name of the storage that the client wants to use.  If the
+        ZEO server serves more than one storage, the client selects
+        the storage it wants to use by name.  The default name is '1',
+        which is also the default name for the ZEO server.
+      </description>
+    </key>
+    <key name="cache-size" datatype="byte-size" default="20MB">
+      <description>
+        The maximum size of the client cache, in bytes, KB or MB.
+      </description>
+    </key>
+    <key name="name" default="">
+      <description>
+        The storage name.  If unspecified, the address of the server
+        will be used as the name.
+      </description>
+    </key>
+    <key name="client">
+      <description>
+        Enables persistent cache files.  The string passed here is
+        used to construct the cache filenames.  If it is not
+        specified, the client creates a temporary cache that will
+        only be used by the current object.
+      </description>
+    </key>
+    <key name="var">
+      <description>
+        The directory where persistent cache files are stored.  By
+        default cache files, if they are persistent, are stored in
+        the current directory.
+      </description>
+    </key>
+    <key name="min-disconnect-poll" datatype="integer" default="5">
+      <description>
+        The minimum delay in seconds between attempts to connect to
+        the server, in seconds.  Defaults to 5 seconds.
+      </description>
+    </key>
+    <key name="max-disconnect-poll" datatype="integer" default="300">
+      <description>
+        The maximum delay in seconds between attempts to connect to
+        the server, in seconds.  Defaults to 300 seconds.
+      </description>
+    </key>
+    <key name="wait" datatype="boolean" default="on">
+      <description>
+        A boolean indicating whether the constructor should wait
+        for the client to connect to the server and verify the cache
+        before returning.  The default is true.
+      </description>
+    </key>
+    <key name="read-only" datatype="boolean" default="off">
+      <description>
+        A flag indicating whether this should be a read-only storage,
+        defaulting to false (i.e. writing is allowed by default).
+      </description>
+    </key>
+    <key name="read-only-fallback" datatype="boolean" default="off">
+      <description>
+        A flag indicating whether a read-only remote storage should be
+        acceptable as a fallback when no writable storages are
+        available.  Defaults to false.  At most one of read_only and
+        read_only_fallback should be true.
+      </description>
+    </key>
+    <key name="username" required="no">
+      <description>
+        The authentication username of the server.
+      </description>
+    </key>
+    <key name="password" required="no">
+      <description>
+        The authentication password of the server.
+      </description>
+    </key>
+    <key name="realm" required="no">
+      <description>
+        The authentication realm of the server.  Some authentication
+        schemes use a realm to identify the logic set of usernames
+        that are accepted by this server.
+      </description>
+    </key>
+  </sectiontype>
+
+  <sectiontype name="demostorage" datatype=".DemoStorage"
+               implements="ZODB.storage">
+    <key name="name" default="Demo Storage"/>
+    <section type="ZODB.storage" name="*" attribute="base"/>
+    <key name="quota" datatype="integer"/>
+  </sectiontype>
+
+
+  <sectiontype name="zodb" datatype=".ZODBDatabase"
+               implements="ZODB.database">
+    <section type="ZODB.storage" name="*" attribute="storage"/>
+    <key name="cache-size" datatype="integer" default="5000"/>
+      <description>
+        Target size, in number of objects, of each connection's
+        object cache.
+      </description>
+    <key name="cache-size-bytes" datatype="byte-size" default="0">
+      <description>
+        Target size, in total estimated size for objects, of each connection's
+        object cache.
+        "0" means no limit.
+      </description>
+    </key>
+    <key name="pool-size" datatype="integer" default="7"/>
+      <description>
+        The expected maximum number of simultaneously open connections.
+        There is no hard limit (as many connections as are requested
+        will be opened, until system resources are exhausted).  Exceeding
+        pool-size connections causes a warning message to be logged,
+        and exceeding twice pool-size connections causes a critical
+        message to be logged.
+      </description>
+    <key name="version-pool-size" datatype="integer" default="3"/>
+      <description>
+        The expected maximum number of connections simultaneously open
+        per version.
+      </description>
+    <key name="version-cache-size" datatype="integer" default="100"/>
+      <description>
+        Target size, in number of objects, of each version connection's
+        object cache.
+      </description>
+    <key name="database-name" default="unnamed"/>
+      <description>
+        When multidatabases are in use, this is the name given to this
+        database in the collection.  The name must be unique across all
+        databases in the collection.  The collection must also be given
+        a mapping from its databases' names to their databases, but that
+        cannot be specified in a ZODB config file.  Applications using
+        multidatabases typical supply a way to configure the mapping in
+        their own config files, using the "databases" parameter of a DB
+        constructor.
+      </description>
+  </sectiontype>
+
+  <sectiontype name="blobstorage" datatype=".BlobStorage"
+    implements="ZODB.storage">
+    <key name="blob-dir" required="yes">
+      <description>
+        Path name to the blob storage directory.
+      </description>
+    </key>
+    <section type="ZODB.storage" name="*" attribute="base"/>
+  </sectiontype>
+
+
+    
+
+
+</component>

Deleted: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/config.py
===================================================================
--- Sandbox/wichert/ZODB38-jarn/src/ZODB/config.py	2008-09-22 16:32:13 UTC (rev 91359)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/config.py	2008-09-22 16:53:27 UTC (rev 91361)
@@ -1,192 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Open database and storage from a configuration.
-
-$Id$"""
-
-import os
-from cStringIO import StringIO
-
-import ZConfig
-
-import ZODB
-
-db_schema_path = os.path.join(ZODB.__path__[0], "config.xml")
-_db_schema = None
-
-s_schema_path = os.path.join(ZODB.__path__[0], "storage.xml")
-_s_schema = None
-
-def getDbSchema():
-    global _db_schema
-    if _db_schema is None:
-        _db_schema = ZConfig.loadSchema(db_schema_path)
-    return _db_schema
-
-def getStorageSchema():
-    global _s_schema
-    if _s_schema is None:
-        _s_schema = ZConfig.loadSchema(s_schema_path)
-    return _s_schema
-
-def databaseFromString(s):
-    return databaseFromFile(StringIO(s))
-
-def databaseFromFile(f):
-    config, handle = ZConfig.loadConfigFile(getDbSchema(), f)
-    return databaseFromConfig(config.database)
-
-def databaseFromURL(url):
-    config, handler = ZConfig.loadConfig(getDbSchema(), url)
-    return databaseFromConfig(config.database)
-
-def databaseFromConfig(section):
-    return section.open()
-
-def storageFromString(s):
-    return storageFromFile(StringIO(s))
-
-def storageFromFile(f):
-    config, handle = ZConfig.loadConfigFile(getStorageSchema(), f)
-    return storageFromConfig(config.storage)
-
-def storageFromURL(url):
-    config, handler = ZConfig.loadConfig(getStorageSchema(), url)
-    return storageFromConfig(config.storage)
-
-def storageFromConfig(section):
-    return section.open()
-
-
-class BaseConfig:
-    """Object representing a configured storage or database.
-
-    Methods:
-
-    open() -- open and return the configured object
-
-    Attributes:
-
-    name   -- name of the storage
-
-    """
-
-    def __init__(self, config):
-        self.config = config
-        self.name = config.getSectionName()
-
-    def open(self, database_name='unnamed', databases=None):
-        """Open and return the storage object."""
-        raise NotImplementedError
-
-class ZODBDatabase(BaseConfig):
-
-    def open(self, databases=None):
-        section = self.config
-        storage = section.storage.open()
-        try:
-            return ZODB.DB(storage,
-                           pool_size=section.pool_size,
-                           cache_size=section.cache_size,
-                           version_pool_size=section.version_pool_size,
-                           version_cache_size=section.version_cache_size,
-                           database_name=section.database_name,
-                           databases=databases)
-        except:
-            storage.close()
-            raise
-
-class MappingStorage(BaseConfig):
-
-    def open(self):
-        from ZODB.MappingStorage import MappingStorage
-        return MappingStorage(self.config.name)
-
-class DemoStorage(BaseConfig):
-
-    def open(self):
-        from ZODB.DemoStorage import DemoStorage
-        if self.config.base:
-            base = self.config.base.open()
-        else:
-            base = None
-        return DemoStorage(self.config.name,
-                           base=base,
-                           quota=self.config.quota)
-
-class FileStorage(BaseConfig):
-
-    def open(self):
-        from ZODB.FileStorage import FileStorage
-        return FileStorage(self.config.path,
-                           create=self.config.create,
-                           read_only=self.config.read_only,
-                           quota=self.config.quota)
-
-class BlobStorage(BaseConfig):
-
-    def open(self):
-        from ZODB.blob import BlobStorage
-        base = self.config.base.open()
-        return BlobStorage(self.config.blob_dir, base)
-
-
-class ZEOClient(BaseConfig):
-
-    def open(self):
-        from ZEO.ClientStorage import ClientStorage
-        # config.server is a multikey of socket-connection-address values
-        # where the value is a socket family, address tuple.
-        L = [server.address for server in self.config.server]
-        return ClientStorage(
-            L,
-            blob_dir=self.config.blob_dir,
-            shared_blob_dir=self.config.shared_blob_dir,
-            storage=self.config.storage,
-            cache_size=self.config.cache_size,
-            name=self.config.name,
-            client=self.config.client,
-            var=self.config.var,
-            min_disconnect_poll=self.config.min_disconnect_poll,
-            max_disconnect_poll=self.config.max_disconnect_poll,
-            wait=self.config.wait,
-            read_only=self.config.read_only,
-            read_only_fallback=self.config.read_only_fallback,
-            username=self.config.username,
-            password=self.config.password,
-            realm=self.config.realm)
-
-class BDBStorage(BaseConfig):
-
-    def open(self):
-        from BDBStorage.BerkeleyBase import BerkeleyConfig
-        storageclass = self.get_storageclass()
-        bconf = BerkeleyConfig()
-        for name in dir(BerkeleyConfig):
-            if name.startswith('_'):
-                continue
-            setattr(bconf, name, getattr(self.config, name))
-        return storageclass(self.config.envdir, config=bconf)
-
-class BDBMinimalStorage(BDBStorage):
-
-    def get_storageclass(self):
-        import BDBStorage.BDBMinimalStorage
-        return BDBStorage.BDBMinimalStorage.BDBMinimalStorage
-
-class BDBFullStorage(BDBStorage):
-
-    def get_storageclass(self):
-        import BDBStorage.BDBFullStorage
-        return BDBStorage.BDBFullStorage.BDBFullStorage

Copied: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/config.py (from rev 91360, Sandbox/wichert/ZODB38-jarn/src/ZODB/config.py)
===================================================================
--- Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/config.py	                        (rev 0)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/config.py	2008-09-22 16:53:27 UTC (rev 91361)
@@ -0,0 +1,193 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE
+#
+##############################################################################
+"""Open database and storage from a configuration.
+
+$Id$"""
+
+import os
+from cStringIO import StringIO
+
+import ZConfig
+
+import ZODB
+
+db_schema_path = os.path.join(ZODB.__path__[0], "config.xml")
+_db_schema = None
+
+s_schema_path = os.path.join(ZODB.__path__[0], "storage.xml")
+_s_schema = None
+
+def getDbSchema():
+    global _db_schema
+    if _db_schema is None:
+        _db_schema = ZConfig.loadSchema(db_schema_path)
+    return _db_schema
+
+def getStorageSchema():
+    global _s_schema
+    if _s_schema is None:
+        _s_schema = ZConfig.loadSchema(s_schema_path)
+    return _s_schema
+
+def databaseFromString(s):
+    return databaseFromFile(StringIO(s))
+
+def databaseFromFile(f):
+    config, handle = ZConfig.loadConfigFile(getDbSchema(), f)
+    return databaseFromConfig(config.database)
+
+def databaseFromURL(url):
+    config, handler = ZConfig.loadConfig(getDbSchema(), url)
+    return databaseFromConfig(config.database)
+
+def databaseFromConfig(section):
+    return section.open()
+
+def storageFromString(s):
+    return storageFromFile(StringIO(s))
+
+def storageFromFile(f):
+    config, handle = ZConfig.loadConfigFile(getStorageSchema(), f)
+    return storageFromConfig(config.storage)
+
+def storageFromURL(url):
+    config, handler = ZConfig.loadConfig(getStorageSchema(), url)
+    return storageFromConfig(config.storage)
+
+def storageFromConfig(section):
+    return section.open()
+
+
+class BaseConfig:
+    """Object representing a configured storage or database.
+
+    Methods:
+
+    open() -- open and return the configured object
+
+    Attributes:
+
+    name   -- name of the storage
+
+    """
+
+    def __init__(self, config):
+        self.config = config
+        self.name = config.getSectionName()
+
+    def open(self, database_name='unnamed', databases=None):
+        """Open and return the storage object."""
+        raise NotImplementedError
+
+class ZODBDatabase(BaseConfig):
+
+    def open(self, databases=None):
+        section = self.config
+        storage = section.storage.open()
+        try:
+            return ZODB.DB(storage,
+                           pool_size=section.pool_size,
+                           cache_size=section.cache_size,
+                           cache_size_bytes=section.cache_size_bytes,
+                           version_pool_size=section.version_pool_size,
+                           version_cache_size=section.version_cache_size,
+                           database_name=section.database_name,
+                           databases=databases)
+        except:
+            storage.close()
+            raise
+
+class MappingStorage(BaseConfig):
+
+    def open(self):
+        from ZODB.MappingStorage import MappingStorage
+        return MappingStorage(self.config.name)
+
+class DemoStorage(BaseConfig):
+
+    def open(self):
+        from ZODB.DemoStorage import DemoStorage
+        if self.config.base:
+            base = self.config.base.open()
+        else:
+            base = None
+        return DemoStorage(self.config.name,
+                           base=base,
+                           quota=self.config.quota)
+
+class FileStorage(BaseConfig):
+
+    def open(self):
+        from ZODB.FileStorage import FileStorage
+        return FileStorage(self.config.path,
+                           create=self.config.create,
+                           read_only=self.config.read_only,
+                           quota=self.config.quota)
+
+class BlobStorage(BaseConfig):
+
+    def open(self):
+        from ZODB.blob import BlobStorage
+        base = self.config.base.open()
+        return BlobStorage(self.config.blob_dir, base)
+
+
+class ZEOClient(BaseConfig):
+
+    def open(self):
+        from ZEO.ClientStorage import ClientStorage
+        # config.server is a multikey of socket-connection-address values
+        # where the value is a socket family, address tuple.
+        L = [server.address for server in self.config.server]
+        return ClientStorage(
+            L,
+            blob_dir=self.config.blob_dir,
+            shared_blob_dir=self.config.shared_blob_dir,
+            storage=self.config.storage,
+            cache_size=self.config.cache_size,
+            name=self.config.name,
+            client=self.config.client,
+            var=self.config.var,
+            min_disconnect_poll=self.config.min_disconnect_poll,
+            max_disconnect_poll=self.config.max_disconnect_poll,
+            wait=self.config.wait,
+            read_only=self.config.read_only,
+            read_only_fallback=self.config.read_only_fallback,
+            username=self.config.username,
+            password=self.config.password,
+            realm=self.config.realm)
+
+class BDBStorage(BaseConfig):
+
+    def open(self):
+        from BDBStorage.BerkeleyBase import BerkeleyConfig
+        storageclass = self.get_storageclass()
+        bconf = BerkeleyConfig()
+        for name in dir(BerkeleyConfig):
+            if name.startswith('_'):
+                continue
+            setattr(bconf, name, getattr(self.config, name))
+        return storageclass(self.config.envdir, config=bconf)
+
+class BDBMinimalStorage(BDBStorage):
+
+    def get_storageclass(self):
+        import BDBStorage.BDBMinimalStorage
+        return BDBStorage.BDBMinimalStorage.BDBMinimalStorage
+
+class BDBFullStorage(BDBStorage):
+
+    def get_storageclass(self):
+        import BDBStorage.BDBFullStorage
+        return BDBStorage.BDBFullStorage.BDBFullStorage

Deleted: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/tests/testConnection.py
===================================================================
--- Sandbox/wichert/ZODB38-jarn/src/ZODB/tests/testConnection.py	2008-09-22 16:32:13 UTC (rev 91359)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/tests/testConnection.py	2008-09-22 16:53:27 UTC (rev 91361)
@@ -1,656 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Unit tests for the Connection class."""
-
-from zope.testing import doctest
-import unittest
-import warnings
-
-from persistent import Persistent
-import transaction
-from ZODB.config import databaseFromString
-from ZODB.utils import p64, u64
-from ZODB.tests.warnhook import WarningsHook
-from zope.interface.verify import verifyObject
-
-class ConnectionDotAdd(unittest.TestCase):
-
-    def setUp(self):
-        from ZODB.Connection import Connection
-        self.db = StubDatabase()
-        self.datamgr = Connection(self.db)
-        self.datamgr.open()
-        self.transaction = StubTransaction()
-
-    def tearDown(self):
-        transaction.abort()
-
-    def check_add(self):
-        from ZODB.POSException import InvalidObjectReference
-        obj = StubObject()
-        self.assert_(obj._p_oid is None)
-        self.assert_(obj._p_jar is None)
-        self.datamgr.add(obj)
-        self.assert_(obj._p_oid is not None)
-        self.assert_(obj._p_jar is self.datamgr)
-        self.assert_(self.datamgr.get(obj._p_oid) is obj)
-
-        # Only first-class persistent objects may be added.
-        self.assertRaises(TypeError, self.datamgr.add, object())
-
-        # Adding to the same connection does not fail. Object keeps the
-        # same oid.
-        oid = obj._p_oid
-        self.datamgr.add(obj)
-        self.assertEqual(obj._p_oid, oid)
-
-        # Cannot add an object from a different connection.
-        obj2 = StubObject()
-        obj2._p_jar = object()
-        self.assertRaises(InvalidObjectReference, self.datamgr.add, obj2)
-
-    def checkResetOnAbort(self):
-        # Check that _p_oid and _p_jar are reset when a transaction is
-        # aborted.
-        obj = StubObject()
-        self.datamgr.add(obj)
-        oid = obj._p_oid
-        self.datamgr.abort(self.transaction)
-        self.assert_(obj._p_oid is None)
-        self.assert_(obj._p_jar is None)
-        self.assertRaises(KeyError, self.datamgr.get, oid)
-
-    def checkResetOnTpcAbort(self):
-        obj = StubObject()
-        self.datamgr.add(obj)
-        oid = obj._p_oid
-
-        # Simulate an error while committing some other object.
-
-        self.datamgr.tpc_begin(self.transaction)
-        # Let's pretend something bad happens here.
-        # Call tpc_abort, clearing everything.
-        self.datamgr.tpc_abort(self.transaction)
-        self.assert_(obj._p_oid is None)
-        self.assert_(obj._p_jar is None)
-        self.assertRaises(KeyError, self.datamgr.get, oid)
-
-    def checkTpcAbortAfterCommit(self):
-        obj = StubObject()
-        self.datamgr.add(obj)
-        oid = obj._p_oid
-        self.datamgr.tpc_begin(self.transaction)
-        self.datamgr.commit(self.transaction)
-        # Let's pretend something bad happened here.
-        self.datamgr.tpc_abort(self.transaction)
-        self.assert_(obj._p_oid is None)
-        self.assert_(obj._p_jar is None)
-        self.assertRaises(KeyError, self.datamgr.get, oid)
-        self.assertEquals(self.db._storage._stored, [oid])
-
-    def checkCommit(self):
-        obj = StubObject()
-        self.datamgr.add(obj)
-        oid = obj._p_oid
-        self.datamgr.tpc_begin(self.transaction)
-        self.datamgr.commit(self.transaction)
-        self.datamgr.tpc_finish(self.transaction)
-        self.assert_(obj._p_oid is oid)
-        self.assert_(obj._p_jar is self.datamgr)
-
-        # This next assert_ is covered by an assert in tpc_finish.
-        ##self.assert_(not self.datamgr._added)
-
-        self.assertEquals(self.db._storage._stored, [oid])
-        self.assertEquals(self.db._storage._finished, [oid])
-
-    def checkModifyOnGetstate(self):
-        member = StubObject()
-        subobj = StubObject()
-        subobj.member = member
-        obj = ModifyOnGetStateObject(subobj)
-        self.datamgr.add(obj)
-        self.datamgr.tpc_begin(self.transaction)
-        self.datamgr.commit(self.transaction)
-        self.datamgr.tpc_finish(self.transaction)
-        storage = self.db._storage
-        self.assert_(obj._p_oid in storage._stored, "object was not stored")
-        self.assert_(subobj._p_oid in storage._stored,
-                "subobject was not stored")
-        self.assert_(member._p_oid in storage._stored, "member was not stored")
-        self.assert_(self.datamgr._added_during_commit is None)
-
-    def checkUnusedAddWorks(self):
-        # When an object is added, but not committed, it shouldn't be stored,
-        # but also it should be an error.
-        obj = StubObject()
-        self.datamgr.add(obj)
-        self.datamgr.tpc_begin(self.transaction)
-        self.datamgr.tpc_finish(self.transaction)
-        self.assert_(obj._p_oid not in self.datamgr._storage._stored)
-
-class UserMethodTests(unittest.TestCase):
-
-    # add isn't tested here, because there are a bunch of traditional
-    # unit tests for it.
-
-    # The version tests would require a storage that supports versions
-    # which is a bit more work.
-
-    def test_root(self):
-        r"""doctest of root() method
-
-        The root() method is simple, and the tests are pretty minimal.
-        Ensure that a new database has a root and that it is a
-        PersistentMapping.
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-        >>> root = cn.root()
-        >>> type(root).__name__
-        'PersistentMapping'
-        >>> root._p_oid
-        '\x00\x00\x00\x00\x00\x00\x00\x00'
-        >>> root._p_jar is cn
-        True
-        >>> db.close()
-        """
-
-    def test_get(self):
-        r"""doctest of get() method
-
-        The get() method return the persistent object corresponding to
-        an oid.
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-        >>> obj = cn.get(p64(0))
-        >>> obj._p_oid
-        '\x00\x00\x00\x00\x00\x00\x00\x00'
-
-        The object is a ghost.
-
-        >>> obj._p_state
-        -1
-
-        And multiple calls with the same oid, return the same object.
-
-        >>> obj2 = cn.get(p64(0))
-        >>> obj is obj2
-        True
-
-        If all references to the object are released, then a new
-        object will be returned. The cache doesn't keep unreferenced
-        ghosts alive.  (The next object returned my still have the
-        same id, because Python may re-use the same memory.)
-
-        >>> del obj, obj2
-        >>> cn._cache.get(p64(0), None)
-
-        If the object is unghosted, then it will stay in the cache
-        after the last reference is released.  (This is true only if
-        there is room in the cache and the object is recently used.)
-
-        >>> obj = cn.get(p64(0))
-        >>> obj._p_activate()
-        >>> y = id(obj)
-        >>> del obj
-        >>> obj = cn.get(p64(0))
-        >>> id(obj) == y
-        True
-        >>> obj._p_state
-        0
-
-        A request for an object that doesn't exist will raise a KeyError.
-
-        >>> cn.get(p64(1))
-        Traceback (most recent call last):
-          ...
-        KeyError: '\x00\x00\x00\x00\x00\x00\x00\x01'
-        """
-
-    def test_close(self):
-        r"""doctest of close() method
-
-        This is a minimal test, because most of the interesting
-        effects on closing a connection involve its interaction with the
-        database and the transaction.
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-
-        It's safe to close a connection multiple times.
-        >>> cn.close()
-        >>> cn.close()
-        >>> cn.close()
-
-        It's not possible to load or store objects once the storage is closed.
-
-        >>> cn.get(p64(0))
-        Traceback (most recent call last):
-          ...
-        ConnectionStateError: The database connection is closed
-        >>> p = Persistent()
-        >>> cn.add(p)
-        Traceback (most recent call last):
-          ...
-        ConnectionStateError: The database connection is closed
-        """
-
-    def test_close_with_pending_changes(self):
-        r"""doctest to ensure close() w/ pending changes complains
-
-        >>> import transaction
-
-        Just opening and closing is fine.
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-        >>> cn.close()
-
-        Opening, making a change, committing, and closing is fine.
-        >>> cn = db.open()
-        >>> cn.root()['a'] = 1
-        >>> transaction.commit()
-        >>> cn.close()
-
-        Opening, making a change, and aborting is fine.
-        >>> cn = db.open()
-        >>> cn.root()['a'] = 1
-        >>> transaction.abort()
-        >>> cn.close()
-
-        But trying to close with a change pending complains.
-        >>> cn = db.open()
-        >>> cn.root()['a'] = 10
-        >>> cn.close()
-        Traceback (most recent call last):
-          ...
-        ConnectionStateError: Cannot close a connection joined to a transaction
-
-        This leaves the connection as it was, so we can still commit
-        the change.
-        >>> transaction.commit()
-        >>> cn2 = db.open()
-        >>> cn2.root()['a']
-        10
-        >>> cn.close(); cn2.close()
-
-        >>> db.close()
-        """
-
-    def test_onCloseCallbacks(self):
-        r"""doctest of onCloseCallback() method
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-
-        Every function registered is called, even if it raises an
-        exception.  They are only called once.
-
-        >>> L = []
-        >>> def f():
-        ...     L.append("f")
-        >>> def g():
-        ...     L.append("g")
-        ...     return 1 / 0
-        >>> cn.onCloseCallback(g)
-        >>> cn.onCloseCallback(f)
-        >>> cn.close()
-        >>> L
-        ['g', 'f']
-        >>> del L[:]
-        >>> cn.close()
-        >>> L
-        []
-
-        The implementation keeps a list of callbacks that is reset
-        to a class variable (which is bound to None) after the connection
-        is closed.
-
-        >>> cn._Connection__onCloseCallbacks
-        """
-
-    def test_db(self):
-        r"""doctest of db() method
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-        >>> cn.db() is db
-        True
-        >>> cn.close()
-        >>> cn.db() is db
-        True
-        """
-
-    def test_isReadOnly(self):
-        r"""doctest of isReadOnly() method
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-        >>> cn.isReadOnly()
-        False
-        >>> cn.close()
-        >>> cn.isReadOnly()
-        Traceback (most recent call last):
-          ...
-        ConnectionStateError: The database connection is closed
-
-        An expedient way to create a read-only storage:
-
-        >>> db._storage._is_read_only = True
-        >>> cn = db.open()
-        >>> cn.isReadOnly()
-        True
-        """
-
-    def test_cache(self):
-        r"""doctest of cacheMinimize().
-
-        Thus test us minimal, just verifying that the method can be called
-        and has some effect.  We need other tests that verify the cache works
-        as intended.
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-        >>> r = cn.root()
-        >>> cn.cacheMinimize()
-        >>> r._p_state
-        -1
-
-        >>> r._p_activate()
-        >>> r._p_state  # up to date
-        0
-        >>> cn.cacheMinimize()
-        >>> r._p_state  # ghost again
-        -1
-        """
-
-class InvalidationTests(unittest.TestCase):
-
-    # It's harder to write serious tests, because some of the critical
-    # correctness issues relate to concurrency.  We'll have to depend
-    # on the various concurrent updates and NZODBThreads tests to
-    # handle these.
-
-    def test_invalidate(self):
-        r"""
-
-        This test initializes the database with several persistent
-        objects, then manually delivers invalidations and verifies that
-        they have the expected effect.
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-        >>> p1 = Persistent()
-        >>> p2 = Persistent()
-        >>> p3 = Persistent()
-        >>> r = cn.root()
-        >>> r.update(dict(p1=p1, p2=p2, p3=p3))
-        >>> transaction.commit()
-
-        Transaction ids are 8-byte strings, just like oids; p64() will
-        create one from an int.
-
-        >>> cn.invalidate(p64(1), {p1._p_oid: 1})
-        >>> cn._txn_time
-        '\x00\x00\x00\x00\x00\x00\x00\x01'
-        >>> p1._p_oid in cn._invalidated
-        True
-        >>> p2._p_oid in cn._invalidated
-        False
-
-        >>> cn.invalidate(p64(10), {p2._p_oid: 1, p64(76): 1})
-        >>> cn._txn_time
-        '\x00\x00\x00\x00\x00\x00\x00\x01'
-        >>> p1._p_oid in cn._invalidated
-        True
-        >>> p2._p_oid in cn._invalidated
-        True
-
-        Calling invalidate() doesn't affect the object state until
-        a transaction boundary.
-
-        >>> p1._p_state
-        0
-        >>> p2._p_state
-        0
-        >>> p3._p_state
-        0
-
-        The sync() method will abort the current transaction and
-        process any pending invalidations.
-
-        >>> cn.sync()
-        >>> p1._p_state
-        -1
-        >>> p2._p_state
-        -1
-        >>> p3._p_state
-        0
-        >>> cn._invalidated
-        set([])
-
-        """
-
-def test_invalidateCache():
-    """The invalidateCache method invalidates a connection's cache.  It also
-    prevents reads until the end of a transaction::
-
-        >>> from ZODB.tests.util import DB
-        >>> import transaction
-        >>> db = DB()
-        >>> tm = transaction.TransactionManager()
-        >>> connection = db.open(transaction_manager=tm)
-        >>> connection.root()['a'] = StubObject()
-        >>> connection.root()['a'].x = 1
-        >>> connection.root()['b'] = StubObject()
-        >>> connection.root()['b'].x = 1
-        >>> connection.root()['c'] = StubObject()
-        >>> connection.root()['c'].x = 1
-        >>> tm.commit()
-        >>> connection.root()['b']._p_deactivate()
-        >>> connection.root()['c'].x = 2
-
-    So we have a connection and an active transaction with some modifications.
-    Lets call invalidateCache:
-
-        >>> connection.invalidateCache()
-
-    Now, if we try to load an object, we'll get a read conflict:
-
-        >>> connection.root()['b'].x
-        Traceback (most recent call last):
-        ...
-        ReadConflictError: database read conflict error
-
-    If we try to commit the transaction, we'll get a conflict error:
-
-        >>> tm.commit()
-        Traceback (most recent call last):
-        ...
-        ConflictError: database conflict error
-
-    and the cache will have been cleared:
-
-        >>> print connection.root()['a']._p_changed
-        None
-        >>> print connection.root()['b']._p_changed
-        None
-        >>> print connection.root()['c']._p_changed
-        None
-
-    But we'll be able to access data again:
-
-        >>> connection.root()['b'].x
-        1
-
-    Aborting a transaction after a read conflict also lets us read data and go
-    on about our business:
-
-        >>> connection.invalidateCache()
-
-        >>> connection.root()['c'].x
-        Traceback (most recent call last):
-        ...
-        ReadConflictError: database read conflict error
-
-        >>> tm.abort()
-        >>> connection.root()['c'].x
-        1
-
-        >>> connection.root()['c'].x = 2
-        >>> tm.commit()
-
-        >>> db.close()
-    """
-
-# ---- stubs
-
-class StubObject(Persistent):
-    pass
-
-class StubTransaction:
-    pass
-
-class ErrorOnGetstateException(Exception):
-    pass
-
-class ErrorOnGetstateObject(Persistent):
-
-    def __getstate__(self):
-        raise ErrorOnGetstateException
-
-class ModifyOnGetStateObject(Persistent):
-
-    def __init__(self, p):
-        self._v_p = p
-
-    def __getstate__(self):
-        self._p_jar.add(self._v_p)
-        self.p = self._v_p
-        return Persistent.__getstate__(self)
-
-
-class StubStorage:
-    """Very simple in-memory storage that does *just* enough to support tests.
-
-    Only one concurrent transaction is supported.
-    Voting is not supported.
-    Versions are not supported.
-
-    Inspect self._stored and self._finished to see how the storage has been
-    used during a unit test. Whenever an object is stored in the store()
-    method, its oid is appended to self._stored. When a transaction is
-    finished, the oids that have been stored during the transaction are
-    appended to self._finished.
-    """
-
-    # internal
-    _oid = 1
-    _transaction = None
-
-    def __init__(self):
-        # internal
-        self._stored = []
-        self._finished = []
-        self._data = {}
-        self._transdata = {}
-        self._transstored = []
-
-    def new_oid(self):
-        oid = str(self._oid)
-        self._oid += 1
-        return oid
-
-    def sortKey(self):
-        return 'StubStorage sortKey'
-
-    def tpc_begin(self, transaction):
-        if transaction is None:
-            raise TypeError('transaction may not be None')
-        elif self._transaction is None:
-            self._transaction = transaction
-        elif self._transaction != transaction:
-            raise RuntimeError(
-                'StubStorage uses only one transaction at a time')
-
-    def tpc_abort(self, transaction):
-        if transaction is None:
-            raise TypeError('transaction may not be None')
-        elif self._transaction != transaction:
-            raise RuntimeError(
-                'StubStorage uses only one transaction at a time')
-        del self._transaction
-        self._transdata.clear()
-
-    def tpc_finish(self, transaction, callback):
-        if transaction is None:
-            raise TypeError('transaction may not be None')
-        elif self._transaction != transaction:
-            raise RuntimeError(
-                'StubStorage uses only one transaction at a time')
-        self._finished.extend(self._transstored)
-        self._data.update(self._transdata)
-        callback(transaction)
-        del self._transaction
-        self._transdata.clear()
-        self._transstored = []
-
-    def load(self, oid, version):
-        if version != '':
-            raise TypeError('StubStorage does not support versions.')
-        return self._data[oid]
-
-    def store(self, oid, serial, p, version, transaction):
-        if version != '':
-            raise TypeError('StubStorage does not support versions.')
-        if transaction is None:
-            raise TypeError('transaction may not be None')
-        elif self._transaction != transaction:
-            raise RuntimeError(
-                'StubStorage uses only one transaction at a time')
-        self._stored.append(oid)
-        self._transstored.append(oid)
-        self._transdata[oid] = (p, serial)
-        # Explicitly returing None, as we're not pretending to be a ZEO
-        # storage
-        return None
-
-
-class TestConnectionInterface(unittest.TestCase):
-
-    def test_connection_interface(self):
-        from ZODB.interfaces import IConnection
-        db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        cn = db.open()
-        verifyObject(IConnection, cn)
-
-
-class StubDatabase:
-
-    def __init__(self):
-        self._storage = StubStorage()
-
-    classFactory = None
-    database_name = 'stubdatabase'
-    databases = {'stubdatabase': database_name}
-
-    def invalidate(self, transaction, dict_with_oid_keys, connection):
-        pass
-
-def test_suite():
-    s = unittest.makeSuite(ConnectionDotAdd, 'check')
-    s.addTest(doctest.DocTestSuite())
-    s.addTest(unittest.makeSuite(TestConnectionInterface))
-    return s

Copied: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/tests/testConnection.py (from rev 91360, Sandbox/wichert/ZODB38-jarn/src/ZODB/tests/testConnection.py)
===================================================================
--- Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/tests/testConnection.py	                        (rev 0)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/ZODB/tests/testConnection.py	2008-09-22 16:53:27 UTC (rev 91361)
@@ -0,0 +1,762 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Unit tests for the Connection class."""
+
+from zope.testing import doctest
+import unittest
+import warnings
+
+from persistent import Persistent
+import transaction
+from ZODB.config import databaseFromString
+from ZODB.utils import p64, u64
+from ZODB.tests.warnhook import WarningsHook
+from zope.interface.verify import verifyObject
+
+class ConnectionDotAdd(unittest.TestCase):
+
+    def setUp(self):
+        from ZODB.Connection import Connection
+        self.db = StubDatabase()
+        self.datamgr = Connection(self.db)
+        self.datamgr.open()
+        self.transaction = StubTransaction()
+
+    def tearDown(self):
+        transaction.abort()
+
+    def check_add(self):
+        from ZODB.POSException import InvalidObjectReference
+        obj = StubObject()
+        self.assert_(obj._p_oid is None)
+        self.assert_(obj._p_jar is None)
+        self.datamgr.add(obj)
+        self.assert_(obj._p_oid is not None)
+        self.assert_(obj._p_jar is self.datamgr)
+        self.assert_(self.datamgr.get(obj._p_oid) is obj)
+
+        # Only first-class persistent objects may be added.
+        self.assertRaises(TypeError, self.datamgr.add, object())
+
+        # Adding to the same connection does not fail. Object keeps the
+        # same oid.
+        oid = obj._p_oid
+        self.datamgr.add(obj)
+        self.assertEqual(obj._p_oid, oid)
+
+        # Cannot add an object from a different connection.
+        obj2 = StubObject()
+        obj2._p_jar = object()
+        self.assertRaises(InvalidObjectReference, self.datamgr.add, obj2)
+
+    def checkResetOnAbort(self):
+        # Check that _p_oid and _p_jar are reset when a transaction is
+        # aborted.
+        obj = StubObject()
+        self.datamgr.add(obj)
+        oid = obj._p_oid
+        self.datamgr.abort(self.transaction)
+        self.assert_(obj._p_oid is None)
+        self.assert_(obj._p_jar is None)
+        self.assertRaises(KeyError, self.datamgr.get, oid)
+
+    def checkResetOnTpcAbort(self):
+        obj = StubObject()
+        self.datamgr.add(obj)
+        oid = obj._p_oid
+
+        # Simulate an error while committing some other object.
+
+        self.datamgr.tpc_begin(self.transaction)
+        # Let's pretend something bad happens here.
+        # Call tpc_abort, clearing everything.
+        self.datamgr.tpc_abort(self.transaction)
+        self.assert_(obj._p_oid is None)
+        self.assert_(obj._p_jar is None)
+        self.assertRaises(KeyError, self.datamgr.get, oid)
+
+    def checkTpcAbortAfterCommit(self):
+        obj = StubObject()
+        self.datamgr.add(obj)
+        oid = obj._p_oid
+        self.datamgr.tpc_begin(self.transaction)
+        self.datamgr.commit(self.transaction)
+        # Let's pretend something bad happened here.
+        self.datamgr.tpc_abort(self.transaction)
+        self.assert_(obj._p_oid is None)
+        self.assert_(obj._p_jar is None)
+        self.assertRaises(KeyError, self.datamgr.get, oid)
+        self.assertEquals(self.db._storage._stored, [oid])
+
+    def checkCommit(self):
+        obj = StubObject()
+        self.datamgr.add(obj)
+        oid = obj._p_oid
+        self.datamgr.tpc_begin(self.transaction)
+        self.datamgr.commit(self.transaction)
+        self.datamgr.tpc_finish(self.transaction)
+        self.assert_(obj._p_oid is oid)
+        self.assert_(obj._p_jar is self.datamgr)
+
+        # This next assert_ is covered by an assert in tpc_finish.
+        ##self.assert_(not self.datamgr._added)
+
+        self.assertEquals(self.db._storage._stored, [oid])
+        self.assertEquals(self.db._storage._finished, [oid])
+
+    def checkModifyOnGetstate(self):
+        member = StubObject()
+        subobj = StubObject()
+        subobj.member = member
+        obj = ModifyOnGetStateObject(subobj)
+        self.datamgr.add(obj)
+        self.datamgr.tpc_begin(self.transaction)
+        self.datamgr.commit(self.transaction)
+        self.datamgr.tpc_finish(self.transaction)
+        storage = self.db._storage
+        self.assert_(obj._p_oid in storage._stored, "object was not stored")
+        self.assert_(subobj._p_oid in storage._stored,
+                "subobject was not stored")
+        self.assert_(member._p_oid in storage._stored, "member was not stored")
+        self.assert_(self.datamgr._added_during_commit is None)
+
+    def checkUnusedAddWorks(self):
+        # When an object is added, but not committed, it shouldn't be stored,
+        # but also it should be an error.
+        obj = StubObject()
+        self.datamgr.add(obj)
+        self.datamgr.tpc_begin(self.transaction)
+        self.datamgr.tpc_finish(self.transaction)
+        self.assert_(obj._p_oid not in self.datamgr._storage._stored)
+
+class UserMethodTests(unittest.TestCase):
+
+    # add isn't tested here, because there are a bunch of traditional
+    # unit tests for it.
+
+    # The version tests would require a storage that supports versions
+    # which is a bit more work.
+
+    def test_root(self):
+        r"""doctest of root() method
+
+        The root() method is simple, and the tests are pretty minimal.
+        Ensure that a new database has a root and that it is a
+        PersistentMapping.
+
+        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
+        >>> cn = db.open()
+        >>> root = cn.root()
+        >>> type(root).__name__
+        'PersistentMapping'
+        >>> root._p_oid
+        '\x00\x00\x00\x00\x00\x00\x00\x00'
+        >>> root._p_jar is cn
+        True
+        >>> db.close()
+        """
+
+    def test_get(self):
+        r"""doctest of get() method
+
+        The get() method return the persistent object corresponding to
+        an oid.
+
+        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
+        >>> cn = db.open()
+        >>> obj = cn.get(p64(0))
+        >>> obj._p_oid
+        '\x00\x00\x00\x00\x00\x00\x00\x00'
+
+        The object is a ghost.
+
+        >>> obj._p_state
+        -1
+
+        And multiple calls with the same oid, return the same object.
+
+        >>> obj2 = cn.get(p64(0))
+        >>> obj is obj2
+        True
+
+        If all references to the object are released, then a new
+        object will be returned. The cache doesn't keep unreferenced
+        ghosts alive.  (The next object returned my still have the
+        same id, because Python may re-use the same memory.)
+
+        >>> del obj, obj2
+        >>> cn._cache.get(p64(0), None)
+
+        If the object is unghosted, then it will stay in the cache
+        after the last reference is released.  (This is true only if
+        there is room in the cache and the object is recently used.)
+
+        >>> obj = cn.get(p64(0))
+        >>> obj._p_activate()
+        >>> y = id(obj)
+        >>> del obj
+        >>> obj = cn.get(p64(0))
+        >>> id(obj) == y
+        True
+        >>> obj._p_state
+        0
+
+        A request for an object that doesn't exist will raise a KeyError.
+
+        >>> cn.get(p64(1))
+        Traceback (most recent call last):
+          ...
+        KeyError: '\x00\x00\x00\x00\x00\x00\x00\x01'
+        """
+
+    def test_close(self):
+        r"""doctest of close() method
+
+        This is a minimal test, because most of the interesting
+        effects on closing a connection involve its interaction with the
+        database and the transaction.
+
+        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
+        >>> cn = db.open()
+
+        It's safe to close a connection multiple times.
+        >>> cn.close()
+        >>> cn.close()
+        >>> cn.close()
+
+        It's not possible to load or store objects once the storage is closed.
+
+        >>> cn.get(p64(0))
+        Traceback (most recent call last):
+          ...
+        ConnectionStateError: The database connection is closed
+        >>> p = Persistent()
+        >>> cn.add(p)
+        Traceback (most recent call last):
+          ...
+        ConnectionStateError: The database connection is closed
+        """
+
+    def test_close_with_pending_changes(self):
+        r"""doctest to ensure close() w/ pending changes complains
+
+        >>> import transaction
+
+        Just opening and closing is fine.
+        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
+        >>> cn = db.open()
+        >>> cn.close()
+
+        Opening, making a change, committing, and closing is fine.
+        >>> cn = db.open()
+        >>> cn.root()['a'] = 1
+        >>> transaction.commit()
+        >>> cn.close()
+
+        Opening, making a change, and aborting is fine.
+        >>> cn = db.open()
+        >>> cn.root()['a'] = 1
+        >>> transaction.abort()
+        >>> cn.close()
+
+        But trying to close with a change pending complains.
+        >>> cn = db.open()
+        >>> cn.root()['a'] = 10
+        >>> cn.close()
+        Traceback (most recent call last):
+          ...
+        ConnectionStateError: Cannot close a connection joined to a transaction
+
+        This leaves the connection as it was, so we can still commit
+        the change.
+        >>> transaction.commit()
+        >>> cn2 = db.open()
+        >>> cn2.root()['a']
+        10
+        >>> cn.close(); cn2.close()
+
+        >>> db.close()
+        """
+
+    def test_onCloseCallbacks(self):
+        r"""doctest of onCloseCallback() method
+
+        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
+        >>> cn = db.open()
+
+        Every function registered is called, even if it raises an
+        exception.  They are only called once.
+
+        >>> L = []
+        >>> def f():
+        ...     L.append("f")
+        >>> def g():
+        ...     L.append("g")
+        ...     return 1 / 0
+        >>> cn.onCloseCallback(g)
+        >>> cn.onCloseCallback(f)
+        >>> cn.close()
+        >>> L
+        ['g', 'f']
+        >>> del L[:]
+        >>> cn.close()
+        >>> L
+        []
+
+        The implementation keeps a list of callbacks that is reset
+        to a class variable (which is bound to None) after the connection
+        is closed.
+
+        >>> cn._Connection__onCloseCallbacks
+        """
+
+    def test_db(self):
+        r"""doctest of db() method
+
+        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
+        >>> cn = db.open()
+        >>> cn.db() is db
+        True
+        >>> cn.close()
+        >>> cn.db() is db
+        True
+        """
+
+    def test_isReadOnly(self):
+        r"""doctest of isReadOnly() method
+
+        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
+        >>> cn = db.open()
+        >>> cn.isReadOnly()
+        False
+        >>> cn.close()
+        >>> cn.isReadOnly()
+        Traceback (most recent call last):
+          ...
+        ConnectionStateError: The database connection is closed
+
+        An expedient way to create a read-only storage:
+
+        >>> db._storage._is_read_only = True
+        >>> cn = db.open()
+        >>> cn.isReadOnly()
+        True
+        """
+
+    def test_cache(self):
+        r"""doctest of cacheMinimize().
+
+        Thus test us minimal, just verifying that the method can be called
+        and has some effect.  We need other tests that verify the cache works
+        as intended.
+
+        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
+        >>> cn = db.open()
+        >>> r = cn.root()
+        >>> cn.cacheMinimize()
+        >>> r._p_state
+        -1
+
+        >>> r._p_activate()
+        >>> r._p_state  # up to date
+        0
+        >>> cn.cacheMinimize()
+        >>> r._p_state  # ghost again
+        -1
+        """
+
+class InvalidationTests(unittest.TestCase):
+
+    # It's harder to write serious tests, because some of the critical
+    # correctness issues relate to concurrency.  We'll have to depend
+    # on the various concurrent updates and NZODBThreads tests to
+    # handle these.
+
+    def test_invalidate(self):
+        r"""
+
+        This test initializes the database with several persistent
+        objects, then manually delivers invalidations and verifies that
+        they have the expected effect.
+
+        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
+        >>> cn = db.open()
+        >>> p1 = Persistent()
+        >>> p2 = Persistent()
+        >>> p3 = Persistent()
+        >>> r = cn.root()
+        >>> r.update(dict(p1=p1, p2=p2, p3=p3))
+        >>> transaction.commit()
+
+        Transaction ids are 8-byte strings, just like oids; p64() will
+        create one from an int.
+
+        >>> cn.invalidate(p64(1), {p1._p_oid: 1})
+        >>> cn._txn_time
+        '\x00\x00\x00\x00\x00\x00\x00\x01'
+        >>> p1._p_oid in cn._invalidated
+        True
+        >>> p2._p_oid in cn._invalidated
+        False
+
+        >>> cn.invalidate(p64(10), {p2._p_oid: 1, p64(76): 1})
+        >>> cn._txn_time
+        '\x00\x00\x00\x00\x00\x00\x00\x01'
+        >>> p1._p_oid in cn._invalidated
+        True
+        >>> p2._p_oid in cn._invalidated
+        True
+
+        Calling invalidate() doesn't affect the object state until
+        a transaction boundary.
+
+        >>> p1._p_state
+        0
+        >>> p2._p_state
+        0
+        >>> p3._p_state
+        0
+
+        The sync() method will abort the current transaction and
+        process any pending invalidations.
+
+        >>> cn.sync()
+        >>> p1._p_state
+        -1
+        >>> p2._p_state
+        -1
+        >>> p3._p_state
+        0
+        >>> cn._invalidated
+        set([])
+
+        """
+
+def test_invalidateCache():
+    """The invalidateCache method invalidates a connection's cache.  It also
+    prevents reads until the end of a transaction::
+
+        >>> from ZODB.tests.util import DB
+        >>> import transaction
+        >>> db = DB()
+        >>> tm = transaction.TransactionManager()
+        >>> connection = db.open(transaction_manager=tm)
+        >>> connection.root()['a'] = StubObject()
+        >>> connection.root()['a'].x = 1
+        >>> connection.root()['b'] = StubObject()
+        >>> connection.root()['b'].x = 1
+        >>> connection.root()['c'] = StubObject()
+        >>> connection.root()['c'].x = 1
+        >>> tm.commit()
+        >>> connection.root()['b']._p_deactivate()
+        >>> connection.root()['c'].x = 2
+
+    So we have a connection and an active transaction with some modifications.
+    Lets call invalidateCache:
+
+        >>> connection.invalidateCache()
+
+    Now, if we try to load an object, we'll get a read conflict:
+
+        >>> connection.root()['b'].x
+        Traceback (most recent call last):
+        ...
+        ReadConflictError: database read conflict error
+
+    If we try to commit the transaction, we'll get a conflict error:
+
+        >>> tm.commit()
+        Traceback (most recent call last):
+        ...
+        ConflictError: database conflict error
+
+    and the cache will have been cleared:
+
+        >>> print connection.root()['a']._p_changed
+        None
+        >>> print connection.root()['b']._p_changed
+        None
+        >>> print connection.root()['c']._p_changed
+        None
+
+    But we'll be able to access data again:
+
+        >>> connection.root()['b'].x
+        1
+
+    Aborting a transaction after a read conflict also lets us read data and go
+    on about our business:
+
+        >>> connection.invalidateCache()
+
+        >>> connection.root()['c'].x
+        Traceback (most recent call last):
+        ...
+        ReadConflictError: database read conflict error
+
+        >>> tm.abort()
+        >>> connection.root()['c'].x
+        1
+
+        >>> connection.root()['c'].x = 2
+        >>> tm.commit()
+
+        >>> db.close()
+    """
+
+class _PlayPersistent(Persistent):
+    def setValueWithSize(self, size=0): self.value = size*' '
+    __init__ = setValueWithSize
+
+class EstimatedSizeTests(unittest.TestCase):
+    """check that size estimations are handled correctly."""
+
+    def setUp(self):
+        self.db = db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
+        self.conn = c = db.open()
+        self.obj = obj = _PlayPersistent()
+        c.root()['obj'] = obj
+        transaction.commit()
+
+    def test_size_set_on_write_commit(self):
+        obj, cache = self.obj, self.conn._cache
+        # we have just written "obj". Its size should not be zero
+        size, cache_size = obj._p_estimated_size, cache.total_estimated_size
+        self.assert_(size > 0)
+        self.assert_(cache_size > size)
+        # increase the size, write again and check that the size changed
+        obj.setValueWithSize(1000)
+        transaction.commit()
+        new_size = obj._p_estimated_size
+        self.assert_(new_size > size)
+        self.assertEqual(cache.total_estimated_size, cache_size + new_size - size)
+
+    def test_size_set_on_write_savepoint(self):
+        obj, cache = self.obj, self.conn._cache
+        # we have just written "obj". Its size should not be zero
+        size, cache_size = obj._p_estimated_size, cache.total_estimated_size
+        # increase the size, write again and check that the size changed
+        obj.setValueWithSize(1000)
+        transaction.savepoint()
+        new_size = obj._p_estimated_size
+        self.assert_(new_size > size)
+        self.assertEqual(cache.total_estimated_size, cache_size + new_size - size)
+
+    def test_size_set_on_load(self):
+        c = self.db.open() # new connection
+        obj = c.root()['obj']
+        # the object is still a ghost and '_p_estimated_size' not yet set
+        # access to unghost
+        cache = c._cache
+        cache_size = cache.total_estimated_size
+        obj.value
+        size = obj._p_estimated_size
+        self.assert_(size > 0)
+        self.assertEqual(cache.total_estimated_size, cache_size + size)
+        # we test here as well that the deactivation works reduced the cache size
+        obj._p_deactivate()
+        self.assertEqual(cache.total_estimated_size, cache_size)
+
+
+    def test_configuration(self):
+        # verify defaults ....
+        expected = 0
+        # ... on db
+        db = self.db
+        self.assertEqual(db.getCacheSizeBytes(), expected)
+        # ... on connection
+        conn = self.conn
+        self.assertEqual(conn._cache.cache_size_bytes, expected)
+        # verify explicit setting ...
+        expected = 10000
+        # ... on db
+        db = databaseFromString("<zodb>\n"
+                                "  cache-size-bytes %d\n"
+                                "  <mappingstorage />\n"
+                                "</zodb>"
+                                % expected
+                                )
+        self.assertEqual(db.getCacheSizeBytes(), expected)
+        # ... on connectionB
+        conn = db.open()
+        self.assertEqual(conn._cache.cache_size_bytes, expected)
+        # test huge (larger than 4 byte) size limit
+        db = databaseFromString("<zodb>\n"
+                                "  cache-size-bytes 8GB\n"
+                                "  <mappingstorage />\n"
+                                "</zodb>"
+                                )
+        self.assertEqual(db.getCacheSizeBytes(), 0x1L << 33)
+
+
+    def test_cache_garbage_collection(self):
+        db = self.db
+        # activate size based cache garbage collection
+        db.setCacheSizeBytes(1)
+        conn = self.conn
+        cache = conn._cache
+        # verify the change worked as expected
+        self.assertEqual(cache.cache_size_bytes, 1)
+        # verify our entrance assumption is fullfilled
+        self.assert_(cache.total_estimated_size > 1)
+        conn.cacheGC()
+        self.assert_(cache.total_estimated_size <= 1)
+        # sanity check
+        self.assert_(cache.total_estimated_size >= 0)
+
+
+
+
+
+
+# ---- stubs
+
+class StubObject(Persistent):
+    pass
+
+class StubTransaction:
+    pass
+
+class ErrorOnGetstateException(Exception):
+    pass
+
+class ErrorOnGetstateObject(Persistent):
+
+    def __getstate__(self):
+        raise ErrorOnGetstateException
+
+class ModifyOnGetStateObject(Persistent):
+
+    def __init__(self, p):
+        self._v_p = p
+
+    def __getstate__(self):
+        self._p_jar.add(self._v_p)
+        self.p = self._v_p
+        return Persistent.__getstate__(self)
+
+
+class StubStorage:
+    """Very simple in-memory storage that does *just* enough to support tests.
+
+    Only one concurrent transaction is supported.
+    Voting is not supported.
+    Versions are not supported.
+
+    Inspect self._stored and self._finished to see how the storage has been
+    used during a unit test. Whenever an object is stored in the store()
+    method, its oid is appended to self._stored. When a transaction is
+    finished, the oids that have been stored during the transaction are
+    appended to self._finished.
+    """
+
+    # internal
+    _oid = 1
+    _transaction = None
+
+    def __init__(self):
+        # internal
+        self._stored = []
+        self._finished = []
+        self._data = {}
+        self._transdata = {}
+        self._transstored = []
+
+    def new_oid(self):
+        oid = str(self._oid)
+        self._oid += 1
+        return oid
+
+    def sortKey(self):
+        return 'StubStorage sortKey'
+
+    def tpc_begin(self, transaction):
+        if transaction is None:
+            raise TypeError('transaction may not be None')
+        elif self._transaction is None:
+            self._transaction = transaction
+        elif self._transaction != transaction:
+            raise RuntimeError(
+                'StubStorage uses only one transaction at a time')
+
+    def tpc_abort(self, transaction):
+        if transaction is None:
+            raise TypeError('transaction may not be None')
+        elif self._transaction != transaction:
+            raise RuntimeError(
+                'StubStorage uses only one transaction at a time')
+        del self._transaction
+        self._transdata.clear()
+
+    def tpc_finish(self, transaction, callback):
+        if transaction is None:
+            raise TypeError('transaction may not be None')
+        elif self._transaction != transaction:
+            raise RuntimeError(
+                'StubStorage uses only one transaction at a time')
+        self._finished.extend(self._transstored)
+        self._data.update(self._transdata)
+        callback(transaction)
+        del self._transaction
+        self._transdata.clear()
+        self._transstored = []
+
+    def load(self, oid, version):
+        if version != '':
+            raise TypeError('StubStorage does not support versions.')
+        return self._data[oid]
+
+    def store(self, oid, serial, p, version, transaction):
+        if version != '':
+            raise TypeError('StubStorage does not support versions.')
+        if transaction is None:
+            raise TypeError('transaction may not be None')
+        elif self._transaction != transaction:
+            raise RuntimeError(
+                'StubStorage uses only one transaction at a time')
+        self._stored.append(oid)
+        self._transstored.append(oid)
+        self._transdata[oid] = (p, serial)
+        # Explicitly returing None, as we're not pretending to be a ZEO
+        # storage
+        return None
+
+
+class TestConnectionInterface(unittest.TestCase):
+
+    def test_connection_interface(self):
+        from ZODB.interfaces import IConnection
+        db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
+        cn = db.open()
+        verifyObject(IConnection, cn)
+
+
+class StubDatabase:
+
+    def __init__(self):
+        self._storage = StubStorage()
+
+    classFactory = None
+    database_name = 'stubdatabase'
+    databases = {'stubdatabase': database_name}
+
+    def invalidate(self, transaction, dict_with_oid_keys, connection):
+        pass
+
+def test_suite():
+    s = unittest.makeSuite(ConnectionDotAdd, 'check')
+    s.addTest(doctest.DocTestSuite())
+    s.addTest(unittest.makeSuite(TestConnectionInterface))
+    s.addTest(unittest.makeSuite(EstimatedSizeTests))
+    return s

Deleted: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPersistence.c
===================================================================
--- Sandbox/wichert/ZODB38-jarn/src/persistent/cPersistence.c	2008-09-22 16:32:13 UTC (rev 91359)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPersistence.c	2008-09-22 16:53:27 UTC (rev 91361)
@@ -1,1225 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-static char cPersistence_doc_string[] =
-"Defines Persistent mixin class for persistent objects.\n"
-"\n"
-"$Id$\n";
-
-#include "cPersistence.h"
-#include "structmember.h"
-
-struct ccobject_head_struct {
-    CACHE_HEAD
-};
-
-/* These two objects are initialized when the module is loaded */
-static PyObject *TimeStamp, *py_simple_new;
-
-/* Strings initialized by init_strings() below. */
-static PyObject *py_keys, *py_setstate, *py___dict__, *py_timeTime;
-static PyObject *py__p_changed, *py__p_deactivate;
-static PyObject *py___getattr__, *py___setattr__, *py___delattr__;
-static PyObject *py___slotnames__, *copy_reg_slotnames, *__newobj__;
-static PyObject *py___getnewargs__, *py___getstate__;
-
-
-static int
-init_strings(void)
-{
-#define INIT_STRING(S) \
-    if (!(py_ ## S = PyString_InternFromString(#S))) \
-	return -1;
-    INIT_STRING(keys);
-    INIT_STRING(setstate);
-    INIT_STRING(timeTime);
-    INIT_STRING(__dict__);
-    INIT_STRING(_p_changed);
-    INIT_STRING(_p_deactivate);
-    INIT_STRING(__getattr__);
-    INIT_STRING(__setattr__);
-    INIT_STRING(__delattr__);
-    INIT_STRING(__slotnames__);
-    INIT_STRING(__getnewargs__);
-    INIT_STRING(__getstate__);
-#undef INIT_STRING
-    return 0;
-}
-
-#ifdef Py_DEBUG
-static void
-fatal_1350(cPersistentObject *self, const char *caller, const char *detail)
-{
-	char buf[1000];
-
-	PyOS_snprintf(buf, sizeof(buf),
-	    "cPersistence.c %s(): object at %p with type %.200s\n"
-	    "%s.\n"
-	    "The only known cause is multiple threads trying to ghost and\n"
-	    "unghost the object simultaneously.\n"
-	    "That's not legal, but ZODB can't stop it.\n"
-	    "See Collector #1350.\n",
-	    caller, self, self->ob_type->tp_name, detail);
-	Py_FatalError(buf);
-}
-#endif
-
-static void ghostify(cPersistentObject*);
-
-/* Load the state of the object, unghostifying it.  Upon success, return 1.
- * If an error occurred, re-ghostify the object and return -1.
- */
-static int
-unghostify(cPersistentObject *self)
-{
-    if (self->state < 0 && self->jar) {
-        PyObject *r;
-
-        /* Is it ever possible to not have a cache? */
-        if (self->cache) {
-            /* Create a node in the ring for this unghostified object. */
-            self->cache->non_ghost_count++;
-	    ring_add(&self->cache->ring_home, &self->ring);
-	    Py_INCREF(self);
-        }
-	/* set state to CHANGED while setstate() call is in progress
-	   to prevent a recursive call to _PyPersist_Load().
-	*/
-        self->state = cPersistent_CHANGED_STATE;
-        /* Call the object's __setstate__() */
-	r = PyObject_CallMethod(self->jar, "setstate", "O", (PyObject *)self);
-        if (r == NULL) {
-            ghostify(self);
-            return -1;
-        }
-        self->state = cPersistent_UPTODATE_STATE;
-        Py_DECREF(r);
-        if (self->cache && self->ring.r_next == NULL) {
-#ifdef Py_DEBUG
-        	fatal_1350(self, "unghostify",
-		    		 "is not in the cache despite that we just "
-		      		 "unghostified it");
-#else
-		PyErr_Format(PyExc_SystemError, "object at %p with type "
-			     "%.200s not in the cache despite that we just "
-			     "unghostified it", self, self->ob_type->tp_name);
-		return -1;
-#endif
-	}
-    }
-    return 1;
-}
-
-/****************************************************************************/
-
-static PyTypeObject Pertype;
-
-static void
-accessed(cPersistentObject *self)
-{
-    /* Do nothing unless the object is in a cache and not a ghost. */
-    if (self->cache && self->state >= 0 && self->ring.r_next)
-	ring_move_to_head(&self->cache->ring_home, &self->ring);
-}
-
-static void
-unlink_from_ring(cPersistentObject *self)
-{
-    /* If the cache has been cleared, then a non-ghost object
-       isn't in the ring any longer.
-    */
-    if (self->ring.r_next == NULL)
-	return;
-
-    /* if we're ghostifying an object, we better have some non-ghosts */
-    assert(self->cache->non_ghost_count > 0);
-    self->cache->non_ghost_count--;
-    ring_del(&self->ring);
-}
-
-static void
-ghostify(cPersistentObject *self)
-{
-    PyObject **dictptr;
-
-    /* are we already a ghost? */
-    if (self->state == cPersistent_GHOST_STATE)
-        return;
-
-    /* Is it ever possible to not have a cache? */
-    if (self->cache == NULL) {
-        self->state = cPersistent_GHOST_STATE;
-        return;
-    }
-
-    if (self->ring.r_next == NULL) {
-	/* There's no way to raise an error in this routine. */
-#ifdef Py_DEBUG
-	fatal_1350(self, "ghostify", "claims to be in a cache but isn't");
-#else
-	return;
-#endif
-    }
-
-    /* If we're ghostifying an object, we better have some non-ghosts. */
-    assert(self->cache->non_ghost_count > 0);
-    self->cache->non_ghost_count--;
-    ring_del(&self->ring);
-    self->state = cPersistent_GHOST_STATE;
-    dictptr = _PyObject_GetDictPtr((PyObject *)self);
-    if (dictptr && *dictptr) {
-	Py_DECREF(*dictptr);
-	*dictptr = NULL;
-    }
-
-    /* We remove the reference to the just ghosted object that the ring
-     * holds.  Note that the dictionary of oids->objects has an uncounted
-     * reference, so if the ring's reference was the only one, this frees
-     * the ghost object.  Note further that the object's dealloc knows to
-     * inform the dictionary that it is going away.
-     */
-    Py_DECREF(self);
-}
-
-static int
-changed(cPersistentObject *self)
-{
-  if ((self->state == cPersistent_UPTODATE_STATE ||
-       self->state == cPersistent_STICKY_STATE)
-       && self->jar)
-    {
-	PyObject *meth, *arg, *result;
-	static PyObject *s_register;
-
-	if (s_register == NULL)
-	    s_register = PyString_InternFromString("register");
-	meth = PyObject_GetAttr((PyObject *)self->jar, s_register);
-	if (meth == NULL)
-	    return -1;
-	arg = PyTuple_New(1);
-	if (arg == NULL) {
-	    Py_DECREF(meth);
-	    return -1;
-	}
-	Py_INCREF(self);
-	PyTuple_SET_ITEM(arg, 0, (PyObject *)self);
-	result = PyEval_CallObject(meth, arg);
-	Py_DECREF(arg);
-	Py_DECREF(meth);
-	if (result == NULL)
-	    return -1;
-	Py_DECREF(result);
-
-	self->state = cPersistent_CHANGED_STATE;
-    }
-
-  return 0;
-}
-
-static PyObject *
-Per__p_deactivate(cPersistentObject *self)
-{
-    if (self->state == cPersistent_UPTODATE_STATE && self->jar) {
-	PyObject **dictptr = _PyObject_GetDictPtr((PyObject *)self);
-	if (dictptr && *dictptr) {
-	    Py_DECREF(*dictptr);
-	    *dictptr = NULL;
-	}
-	/* Note that we need to set to ghost state unless we are
-	   called directly. Methods that override this need to
-	   do the same! */
-	ghostify(self);
-    }
-
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-
-static PyObject *
-Per__p_activate(cPersistentObject *self)
-{
-    if (unghostify(self) < 0)
-        return NULL;
-
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-
-static int Per_set_changed(cPersistentObject *self, PyObject *v);
-
-static PyObject *
-Per__p_invalidate(cPersistentObject *self)
-{
-    signed char old_state = self->state;
-
-    if (old_state != cPersistent_GHOST_STATE) {
-        if (Per_set_changed(self, NULL) < 0)
-            return NULL;
-        ghostify(self);
-    }
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-
-
-static PyObject *
-pickle_slotnames(PyTypeObject *cls)
-{
-    PyObject *slotnames;
-
-    slotnames = PyDict_GetItem(cls->tp_dict, py___slotnames__);
-    if (slotnames) {
-	Py_INCREF(slotnames);
-	return slotnames;
-    }
-
-    slotnames = PyObject_CallFunctionObjArgs(copy_reg_slotnames,
-					     (PyObject*)cls, NULL);
-    if (slotnames && !(slotnames == Py_None || PyList_Check(slotnames))) {
-	PyErr_SetString(PyExc_TypeError,
-			"copy_reg._slotnames didn't return a list or None");
-	Py_DECREF(slotnames);
-	return NULL;
-    }
-
-    return slotnames;
-}
-
-static PyObject *
-pickle_copy_dict(PyObject *state)
-{
-    PyObject *copy, *key, *value;
-    char *ckey;
-    Py_ssize_t pos = 0;
-
-    copy = PyDict_New();
-    if (!copy)
-	return NULL;
-
-    if (!state)
-	return copy;
-
-    while (PyDict_Next(state, &pos, &key, &value)) {
-	if (key && PyString_Check(key)) {
-	    ckey = PyString_AS_STRING(key);
-	    if (*ckey == '_' &&
-		(ckey[1] == 'v' || ckey[1] == 'p') &&
-		ckey[2] == '_')
-		/* skip volatile and persistent */
-		continue;
-        }
-
-	if (PyObject_SetItem(copy, key, value) < 0)
-	    goto err;
-    }
-
-    return copy;
- err:
-    Py_DECREF(copy);
-    return NULL;
-}
-
-
-static char pickle___getstate__doc[] =
-"Get the object serialization state\n"
-"\n"
-"If the object has no assigned slots and has no instance dictionary, then \n"
-"None is returned.\n"
-"\n"
-"If the object has no assigned slots and has an instance dictionary, then \n"
-"the a copy of the instance dictionary is returned. The copy has any items \n"
-"with names starting with '_v_' or '_p_' ommitted.\n"
-"\n"
-"If the object has assigned slots, then a two-element tuple is returned.  \n"
-"The first element is either None or a copy of the instance dictionary, \n"
-"as described above. The second element is a dictionary with items \n"
-"for each of the assigned slots.\n"
-;
-
-static PyObject *
-pickle___getstate__(PyObject *self)
-{
-    PyObject *slotnames=NULL, *slots=NULL, *state=NULL;
-    PyObject **dictp;
-    int n=0;
-
-    slotnames = pickle_slotnames(self->ob_type);
-    if (!slotnames)
-	return NULL;
-
-    dictp = _PyObject_GetDictPtr(self);
-    if (dictp)
-	state = pickle_copy_dict(*dictp);
-    else {
-	state = Py_None;
-	Py_INCREF(state);
-    }
-
-    if (slotnames != Py_None) {
-	int i;
-
-	slots = PyDict_New();
-	if (!slots)
-	    goto end;
-
-	for (i = 0; i < PyList_GET_SIZE(slotnames); i++) {
-	    PyObject *name, *value;
-	    char *cname;
-
-	    name = PyList_GET_ITEM(slotnames, i);
-	    if (PyString_Check(name)) {
-		cname = PyString_AS_STRING(name);
-		if (*cname == '_' &&
-		    (cname[1] == 'v' || cname[1] == 'p') &&
-		    cname[2] == '_')
-		    /* skip volatile and persistent */
-		    continue;
-            }
-
-	    /* Unclear:  Will this go through our getattr hook? */
-	    value = PyObject_GetAttr(self, name);
-	    if (value == NULL)
-		PyErr_Clear();
-	    else {
-		int err = PyDict_SetItem(slots, name, value);
-		Py_DECREF(value);
-		if (err < 0)
-		    goto end;
-		n++;
-            }
-        }
-    }
-
-    if (n)
-	state = Py_BuildValue("(NO)", state, slots);
-
- end:
-    Py_XDECREF(slotnames);
-    Py_XDECREF(slots);
-
-    return state;
-}
-
-static int
-pickle_setattrs_from_dict(PyObject *self, PyObject *dict)
-{
-    PyObject *key, *value;
-    Py_ssize_t pos = 0;
-
-    if (!PyDict_Check(dict)) {
-	PyErr_SetString(PyExc_TypeError, "Expected dictionary");
-	return -1;
-    }
-
-    while (PyDict_Next(dict, &pos, &key, &value)) {
-	if (PyObject_SetAttr(self, key, value) < 0)
-	    return -1;
-    }
-    return 0;
-}
-
-static char pickle___setstate__doc[] =
-"Set the object serialization state\n\n"
-"The state should be in one of 3 forms:\n\n"
-"- None\n\n"
-"  Ignored\n\n"
-"- A dictionary\n\n"
-"  In this case, the object's instance dictionary will be cleared and \n"
-"  updated with the new state.\n\n"
-"- A two-tuple with a string as the first element. \n\n"
-"  In this case, the method named by the string in the first element will be\n"
-"  called with the second element.\n\n"
-"  This form supports migration of data formats.\n\n"
-"- A two-tuple with None or a Dictionary as the first element and\n"
-"  with a dictionary as the second element.\n\n"
-"  If the first element is not None, then the object's instance dictionary \n"
-"  will be cleared and updated with the value.\n\n"
-"  The items in the second element will be assigned as attributes.\n"
-;
-
-static PyObject *
-pickle___setstate__(PyObject *self, PyObject *state)
-{
-    PyObject *slots=NULL;
-
-    if (PyTuple_Check(state)) {
-	if (!PyArg_ParseTuple(state, "OO:__setstate__", &state, &slots))
-	    return NULL;
-    }
-
-    if (state != Py_None) {
-	PyObject **dict;
-
-	dict = _PyObject_GetDictPtr(self);
-	if (dict) {
-	    if (!*dict) {
-		*dict = PyDict_New();
-		if (!*dict)
-		    return NULL;
-            }
-        }
-
-	if (*dict) {
-	    PyDict_Clear(*dict);
-	    if (PyDict_Update(*dict, state) < 0)
-		return NULL;
-        }
-	else if (pickle_setattrs_from_dict(self, state) < 0)
-	    return NULL;
-    }
-
-    if (slots && pickle_setattrs_from_dict(self, slots) < 0)
-	return NULL;
-
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-
-static char pickle___reduce__doc[] =
-"Reduce an object to contituent parts for serialization\n"
-;
-
-static PyObject *
-pickle___reduce__(PyObject *self)
-{
-    PyObject *args=NULL, *bargs=NULL, *state=NULL, *getnewargs=NULL;
-    int l, i;
-
-    getnewargs = PyObject_GetAttr(self, py___getnewargs__);
-    if (getnewargs) {
-	bargs = PyObject_CallFunctionObjArgs(getnewargs, NULL);
-	Py_DECREF(getnewargs);
-	if (!bargs)
-	    return NULL;
-	l = PyTuple_Size(bargs);
-	if (l < 0)
-	    goto end;
-    }
-    else {
-	PyErr_Clear();
-	l = 0;
-    }
-
-    args = PyTuple_New(l+1);
-    if (args == NULL)
-	goto end;
-
-    Py_INCREF(self->ob_type);
-    PyTuple_SET_ITEM(args, 0, (PyObject*)(self->ob_type));
-    for (i = 0; i < l; i++) {
-	Py_INCREF(PyTuple_GET_ITEM(bargs, i));
-	PyTuple_SET_ITEM(args, i+1, PyTuple_GET_ITEM(bargs, i));
-    }
-
-    state = PyObject_CallMethodObjArgs(self, py___getstate__, NULL);
-    if (!state)
-	goto end;
-
-    state = Py_BuildValue("(OON)", __newobj__, args, state);
-
- end:
-    Py_XDECREF(bargs);
-    Py_XDECREF(args);
-
-    return state;
-}
-
-
-/* Return the object's state, a dict or None.
-
-   If the object has no dict, it's state is None.
-   Otherwise, return a dict containing all the attributes that
-   don't start with "_v_".
-
-   The caller should not modify this dict, as it may be a reference to
-   the object's __dict__.
-*/
-
-static PyObject *
-Per__getstate__(cPersistentObject *self)
-{
-    /* TODO:  Should it be an error to call __getstate__() on a ghost? */
-    if (unghostify(self) < 0)
-        return NULL;
-
-    /* TODO:  should we increment stickyness?  Tim doesn't understand that
-       question. S*/
-    return pickle___getstate__((PyObject*)self);
-}
-
-/* The Persistent base type provides a traverse function, but not a
-   clear function.  An instance of a Persistent subclass will have
-   its dict cleared through subtype_clear().
-
-   There is always a cycle between a persistent object and its cache.
-   When the cycle becomes unreachable, the clear function for the
-   cache will break the cycle.  Thus, the persistent object need not
-   have a clear function.  It would be complex to write a clear function
-   for the objects, if we needed one, because of the reference count
-   tricks done by the cache.
-*/
-
-static void
-Per_dealloc(cPersistentObject *self)
-{
-    if (self->state >= 0)
-	unlink_from_ring(self);
-    if (self->cache)
-	cPersistenceCAPI->percachedel(self->cache, self->oid);
-    Py_XDECREF(self->cache);
-    Py_XDECREF(self->jar);
-    Py_XDECREF(self->oid);
-    self->ob_type->tp_free(self);
-}
-
-static int
-Per_traverse(cPersistentObject *self, visitproc visit, void *arg)
-{
-    int err;
-
-#define VISIT(SLOT) \
-    if (SLOT) { \
-	err = visit((PyObject *)(SLOT), arg); \
-	if (err) \
-		     return err; \
-    }
-
-    VISIT(self->jar);
-    VISIT(self->oid);
-    VISIT(self->cache);
-
-#undef VISIT
-    return 0;
-}
-
-/* convert_name() returns a new reference to a string name
-   or sets an exception and returns NULL.
-*/
-
-static PyObject *
-convert_name(PyObject *name)
-{
-#ifdef Py_USING_UNICODE
-    /* The Unicode to string conversion is done here because the
-       existing tp_setattro slots expect a string object as name
-       and we wouldn't want to break those. */
-    if (PyUnicode_Check(name)) {
-	name = PyUnicode_AsEncodedString(name, NULL, NULL);
-    }
-    else
-#endif
-    if (!PyString_Check(name)) {
-	PyErr_SetString(PyExc_TypeError, "attribute name must be a string");
-	return NULL;
-    } else
-	Py_INCREF(name);
-    return name;
-}
-
-/* Returns true if the object requires unghostification.
-
-   There are several special attributes that we allow access to without
-   requiring that the object be unghostified:
-   __class__
-   __del__
-   __dict__
-   __of__
-   __setstate__
-*/
-
-static int
-unghost_getattr(const char *s)
-{
-    if (*s++ != '_')
-	return 1;
-    if (*s == 'p') {
-	s++;
-	if (*s == '_')
-	    return 0; /* _p_ */
-	else
-	    return 1;
-    }
-    else if (*s == '_') {
-	s++;
-	switch (*s) {
-	case 'c':
-	    return strcmp(s, "class__");
-	case 'd':
-	    s++;
-	    if (!strcmp(s, "el__"))
-		return 0; /* __del__ */
-	    if (!strcmp(s, "ict__"))
-		return 0; /* __dict__ */
-	    return 1;
-	case 'o':
-	    return strcmp(s, "of__");
-	case 's':
-	    return strcmp(s, "setstate__");
-	default:
-	    return 1;
-	}
-    }
-    return 1;
-}
-
-static PyObject*
-Per_getattro(cPersistentObject *self, PyObject *name)
-{
-    PyObject *result = NULL;	/* guilty until proved innocent */
-    char *s;
-
-    name = convert_name(name);
-    if (!name)
-	goto Done;
-    s = PyString_AS_STRING(name);
-
-    if (unghost_getattr(s)) {
-	if (unghostify(self) < 0)
-	    goto Done;
-	accessed(self);
-    }
-    result = PyObject_GenericGetAttr((PyObject *)self, name);
-
-  Done:
-    Py_XDECREF(name);
-    return result;
-}
-
-/* Exposed as _p_getattr method.  Test whether base getattr should be used */
-static PyObject *
-Per__p_getattr(cPersistentObject *self, PyObject *name)
-{
-    PyObject *result = NULL;	/* guilty until proved innocent */
-    char *s;
-
-    name = convert_name(name);
-    if (!name)
-	goto Done;
-    s = PyString_AS_STRING(name);
-
-    if (*s != '_' || unghost_getattr(s)) {
-	if (unghostify(self) < 0)
-	    goto Done;
-	accessed(self);
-        result = Py_False;
-    }
-    else
-	result = Py_True;
-
-    Py_INCREF(result);
-
-  Done:
-    Py_XDECREF(name);
-    return result;
-}
-
-/*
-   TODO:  we should probably not allow assignment of __class__ and __dict__.
-*/
-
-static int
-Per_setattro(cPersistentObject *self, PyObject *name, PyObject *v)
-{
-    int result = -1;	/* guilty until proved innocent */
-    char *s;
-
-    name = convert_name(name);
-    if (!name)
-	goto Done;
-    s = PyString_AS_STRING(name);
-
-    if (strncmp(s, "_p_", 3) != 0) {
-	if (unghostify(self) < 0)
-	    goto Done;
-	accessed(self);
-	if (strncmp(s, "_v_", 3) != 0
-	    && self->state != cPersistent_CHANGED_STATE) {
-	    if (changed(self) < 0)
-		goto Done;
-	}
-    }
-    result = PyObject_GenericSetAttr((PyObject *)self, name, v);
-
- Done:
-    Py_XDECREF(name);
-    return result;
-}
-
-
-static int
-Per_p_set_or_delattro(cPersistentObject *self, PyObject *name, PyObject *v)
-{
-    int result = -1;	/* guilty until proved innocent */
-    char *s;
-
-    name = convert_name(name);
-    if (!name)
-	goto Done;
-    s = PyString_AS_STRING(name);
-
-    if (strncmp(s, "_p_", 3)) {
-	if (unghostify(self) < 0)
-	    goto Done;
-	accessed(self);
-
-        result = 0;
-    }
-    else {
-        if (PyObject_GenericSetAttr((PyObject *)self, name, v) < 0)
-	    goto Done;
-        result = 1;
-    }
-
- Done:
-    Py_XDECREF(name);
-    return result;
-}
-
-static PyObject *
-Per__p_setattr(cPersistentObject *self, PyObject *args)
-{
-    PyObject *name, *v, *result;
-    int r;
-
-    if (!PyArg_ParseTuple(args, "OO:_p_setattr", &name, &v))
-	return NULL;
-
-    r = Per_p_set_or_delattro(self, name, v);
-    if (r < 0)
-	return NULL;
-
-    result = r ? Py_True : Py_False;
-    Py_INCREF(result);
-    return result;
-}
-
-static PyObject *
-Per__p_delattr(cPersistentObject *self, PyObject *name)
-{
-    int r;
-    PyObject *result;
-
-    r = Per_p_set_or_delattro(self, name, NULL);
-    if (r < 0)
-	return NULL;
-
-    result = r ? Py_True : Py_False;
-    Py_INCREF(result);
-    return result;
-}
-
-
-static PyObject *
-Per_get_changed(cPersistentObject *self)
-{
-    if (self->state < 0) {
-	Py_INCREF(Py_None);
-	return Py_None;
-    }
-    return PyBool_FromLong(self->state == cPersistent_CHANGED_STATE);
-}
-
-static int
-Per_set_changed(cPersistentObject *self, PyObject *v)
-{
-    int deactivate = 0;
-    int true;
-
-    if (!v) {
-	/* delattr is used to invalidate an object even if it has changed. */
-	if (self->state != cPersistent_GHOST_STATE)
-	    self->state = cPersistent_UPTODATE_STATE;
-	deactivate = 1;
-    }
-    else if (v == Py_None)
-	deactivate = 1;
-
-    if (deactivate) {
-	PyObject *res, *meth;
-	meth = PyObject_GetAttr((PyObject *)self, py__p_deactivate);
-	if (meth == NULL)
-	    return -1;
-	res = PyObject_CallObject(meth, NULL);
-	if (res)
-	    Py_DECREF(res);
-	else {
-	    /* an error occured in _p_deactivate().
-
-	    It's not clear what we should do here.  The code is
-	    obviously ignoring the exception, but it shouldn't return
-	    0 for a getattr and set an exception.  The simplest change
-	    is to clear the exception, but that simply masks the
-	    error.
-
-	    This prints an error to stderr just like exceptions in
-	    __del__().  It would probably be better to log it but that
-	    would be painful from C.
-	    */
-	    PyErr_WriteUnraisable(meth);
-	}
-	Py_DECREF(meth);
-	return 0;
-    }
-    /* !deactivate.  If passed a true argument, mark self as changed (starting
-     * with ZODB 3.6, that includes activating the object if it's a ghost).
-     * If passed a false argument, and the object isn't a ghost, set the
-     * state as up-to-date.
-     */
-    true = PyObject_IsTrue(v);
-    if (true == -1)
-	return -1;
-    if (true) {
-    	if (self->state < 0) {
-    	    if (unghostify(self) < 0)
-    	        return -1;
-    	    }
-	return changed(self);
-    }
-
-    /* We were passed a false, non-None argument.  If we're not a ghost,
-     * mark self as up-to-date.
-     */
-    if (self->state >= 0)
-	self->state = cPersistent_UPTODATE_STATE;
-    return 0;
-}
-
-static PyObject *
-Per_get_oid(cPersistentObject *self)
-{
-    PyObject *oid = self->oid ? self->oid : Py_None;
-    Py_INCREF(oid);
-    return oid;
-}
-
-static int
-Per_set_oid(cPersistentObject *self, PyObject *v)
-{
-    if (self->cache) {
-	int result;
-
-	if (v == NULL) {
-	    PyErr_SetString(PyExc_ValueError,
-			    "can't delete _p_oid of cached object");
-	    return -1;
-	}
-	if (PyObject_Cmp(self->oid, v, &result) < 0)
-	    return -1;
-	if (result) {
-	    PyErr_SetString(PyExc_ValueError,
-			    "can not change _p_oid of cached object");
-	    return -1;
-	}
-    }
-    Py_XDECREF(self->oid);
-    Py_XINCREF(v);
-    self->oid = v;
-    return 0;
-}
-
-static PyObject *
-Per_get_jar(cPersistentObject *self)
-{
-    PyObject *jar = self->jar ? self->jar : Py_None;
-    Py_INCREF(jar);
-    return jar;
-}
-
-static int
-Per_set_jar(cPersistentObject *self, PyObject *v)
-{
-    if (self->cache) {
-	int result;
-
-	if (v == NULL) {
-	    PyErr_SetString(PyExc_ValueError,
-			    "can't delete _p_jar of cached object");
-	    return -1;
-	}
-	if (PyObject_Cmp(self->jar, v, &result) < 0)
-	    return -1;
-	if (result) {
-	    PyErr_SetString(PyExc_ValueError,
-			    "can not change _p_jar of cached object");
-	    return -1;
-	}
-    }
-    Py_XDECREF(self->jar);
-    Py_XINCREF(v);
-    self->jar = v;
-    return 0;
-}
-
-static PyObject *
-Per_get_serial(cPersistentObject *self)
-{
-    return PyString_FromStringAndSize(self->serial, 8);
-}
-
-static int
-Per_set_serial(cPersistentObject *self, PyObject *v)
-{
-    if (v) {
-	if (PyString_Check(v) && PyString_GET_SIZE(v) == 8)
-	    memcpy(self->serial, PyString_AS_STRING(v), 8);
-	else {
-	    PyErr_SetString(PyExc_ValueError,
-			    "_p_serial must be an 8-character string");
-	    return -1;
-	}
-    } else
-	memset(self->serial, 0, 8);
-    return 0;
-}
-
-static PyObject *
-Per_get_mtime(cPersistentObject *self)
-{
-    PyObject *t, *v;
-
-    if (unghostify(self) < 0)
-	return NULL;
-
-    accessed(self);
-
-    if (memcmp(self->serial, "\0\0\0\0\0\0\0\0", 8) == 0) {
-	Py_INCREF(Py_None);
-	return Py_None;
-    }
-
-    t = PyObject_CallFunction(TimeStamp, "s#", self->serial, 8);
-    if (!t)
-	return NULL;
-    v = PyObject_CallMethod(t, "timeTime", "");
-    Py_DECREF(t);
-    return v;
-}
-
-static PyObject *
-Per_get_state(cPersistentObject *self)
-{
-    return PyInt_FromLong(self->state);
-}
-
-static PyGetSetDef Per_getsets[] = {
-    {"_p_changed", (getter)Per_get_changed, (setter)Per_set_changed},
-    {"_p_jar", (getter)Per_get_jar, (setter)Per_set_jar},
-    {"_p_mtime", (getter)Per_get_mtime},
-    {"_p_oid", (getter)Per_get_oid, (setter)Per_set_oid},
-    {"_p_serial", (getter)Per_get_serial, (setter)Per_set_serial},
-    {"_p_state", (getter)Per_get_state},
-    {NULL}
-};
-
-static struct PyMethodDef Per_methods[] = {
-  {"_p_deactivate", (PyCFunction)Per__p_deactivate, METH_NOARGS,
-   "_p_deactivate() -- Deactivate the object"},
-  {"_p_activate", (PyCFunction)Per__p_activate, METH_NOARGS,
-   "_p_activate() -- Activate the object"},
-  {"_p_invalidate", (PyCFunction)Per__p_invalidate, METH_NOARGS,
-   "_p_invalidate() -- Invalidate the object"},
-  {"_p_getattr", (PyCFunction)Per__p_getattr, METH_O,
-   "_p_getattr(name) -- Test whether the base class must handle the name\n"
-   "\n"
-   "The method unghostifies the object, if necessary.\n"
-   "The method records the object access, if necessary.\n"
-   "\n"
-   "This method should be called by subclass __getattribute__\n"
-   "implementations before doing anything else. If the method\n"
-   "returns True, then __getattribute__ implementations must delegate\n"
-   "to the base class, Persistent.\n"
-  },
-  {"_p_setattr", (PyCFunction)Per__p_setattr, METH_VARARGS,
-   "_p_setattr(name, value) -- Save persistent meta data\n"
-   "\n"
-   "This method should be called by subclass __setattr__ implementations\n"
-   "before doing anything else.  If it returns true, then the attribute\n"
-   "was handled by the base class.\n"
-   "\n"
-   "The method unghostifies the object, if necessary.\n"
-   "The method records the object access, if necessary.\n"
-  },
-  {"_p_delattr", (PyCFunction)Per__p_delattr, METH_O,
-   "_p_delattr(name) -- Delete persistent meta data\n"
-   "\n"
-   "This method should be called by subclass __delattr__ implementations\n"
-   "before doing anything else.  If it returns true, then the attribute\n"
-   "was handled by the base class.\n"
-   "\n"
-   "The method unghostifies the object, if necessary.\n"
-   "The method records the object access, if necessary.\n"
-  },
-  {"__getstate__", (PyCFunction)Per__getstate__, METH_NOARGS,
-   pickle___getstate__doc },
-  {"__setstate__", (PyCFunction)pickle___setstate__, METH_O,
-   pickle___setstate__doc},
-  {"__reduce__", (PyCFunction)pickle___reduce__, METH_NOARGS,
-   pickle___reduce__doc},
-
-  {NULL,		NULL}		/* sentinel */
-};
-
-/* This module is compiled as a shared library.  Some compilers don't
-   allow addresses of Python objects defined in other libraries to be
-   used in static initializers here.  The DEFERRED_ADDRESS macro is
-   used to tag the slots where such addresses appear; the module init
-   function must fill in the tagged slots at runtime.  The argument is
-   for documentation -- the macro ignores it.
-*/
-#define DEFERRED_ADDRESS(ADDR) 0
-
-static PyTypeObject Pertype = {
-    PyObject_HEAD_INIT(DEFERRED_ADDRESS(&PyPersist_MetaType))
-    0,					/* ob_size */
-    "persistent.Persistent",		/* tp_name */
-    sizeof(cPersistentObject),		/* tp_basicsize */
-    0,					/* tp_itemsize */
-    (destructor)Per_dealloc,		/* tp_dealloc */
-    0,					/* tp_print */
-    0,					/* tp_getattr */
-    0,					/* tp_setattr */
-    0,					/* tp_compare */
-    0,					/* tp_repr */
-    0,					/* tp_as_number */
-    0,					/* tp_as_sequence */
-    0,					/* tp_as_mapping */
-    0,					/* tp_hash */
-    0,					/* tp_call */
-    0,					/* tp_str */
-    (getattrofunc)Per_getattro,		/* tp_getattro */
-    (setattrofunc)Per_setattro,		/* tp_setattro */
-    0,					/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
-    					/* tp_flags */
-    0,					/* tp_doc */
-    (traverseproc)Per_traverse,		/* tp_traverse */
-    0,					/* tp_clear */
-    0,					/* tp_richcompare */
-    0,					/* tp_weaklistoffset */
-    0,					/* tp_iter */
-    0,					/* tp_iternext */
-    Per_methods,			/* tp_methods */
-    0,					/* tp_members */
-    Per_getsets,			/* tp_getset */
-};
-
-/* End of code for Persistent objects */
-/* -------------------------------------------------------- */
-
-typedef int (*intfunctionwithpythonarg)(PyObject*);
-
-/* Load the object's state if necessary and become sticky */
-static int
-Per_setstate(cPersistentObject *self)
-{
-    if (unghostify(self) < 0)
-        return -1;
-    self->state = cPersistent_STICKY_STATE;
-    return 0;
-}
-
-static PyObject *
-simple_new(PyObject *self, PyObject *type_object)
-{
-    return PyType_GenericNew((PyTypeObject *)type_object, NULL, NULL);
-}
-
-static PyMethodDef cPersistence_methods[] = {
-    {"simple_new", simple_new, METH_O,
-     "Create an object by simply calling a class's __new__ method without "
-     "arguments."},
-    {NULL, NULL}
-};
-
-
-static cPersistenceCAPIstruct
-truecPersistenceCAPI = {
-    &Pertype,
-    (getattrofunc)Per_getattro,	/*tp_getattr with object key*/
-    (setattrofunc)Per_setattro,	/*tp_setattr with object key*/
-    changed,
-    accessed,
-    ghostify,
-    (intfunctionwithpythonarg)Per_setstate,
-    NULL /* The percachedel slot is initialized in cPickleCache.c when
-	    the module is loaded.  It uses a function in a different
-	    shared library. */
-};
-
-void
-initcPersistence(void)
-{
-    PyObject *m, *s;
-    PyObject *copy_reg;
-
-    if (init_strings() < 0)
-      return;
-
-    m = Py_InitModule3("cPersistence", cPersistence_methods,
-		       cPersistence_doc_string);
-
-    Pertype.ob_type = &PyType_Type;
-    Pertype.tp_new = PyType_GenericNew;
-    if (PyType_Ready(&Pertype) < 0)
-	return;
-    if (PyModule_AddObject(m, "Persistent", (PyObject *)&Pertype) < 0)
-	return;
-
-    cPersistenceCAPI = &truecPersistenceCAPI;
-    s = PyCObject_FromVoidPtr(cPersistenceCAPI, NULL);
-    if (!s)
-	return;
-    if (PyModule_AddObject(m, "CAPI", s) < 0)
-	return;
-
-    if (PyModule_AddIntConstant(m, "GHOST", cPersistent_GHOST_STATE) < 0)
-	return;
-
-    if (PyModule_AddIntConstant(m, "UPTODATE", cPersistent_UPTODATE_STATE) < 0)
-	return;
-
-    if (PyModule_AddIntConstant(m, "CHANGED", cPersistent_CHANGED_STATE) < 0)
-	return;
-
-    if (PyModule_AddIntConstant(m, "STICKY", cPersistent_STICKY_STATE) < 0)
-	return;
-
-    py_simple_new = PyObject_GetAttrString(m, "simple_new");
-    if (!py_simple_new)
-        return;
-
-    copy_reg = PyImport_ImportModule("copy_reg");
-    if (!copy_reg)
-	return;
-
-    copy_reg_slotnames = PyObject_GetAttrString(copy_reg, "_slotnames");
-    if (!copy_reg_slotnames) {
-	Py_DECREF(copy_reg);
-	return;
-    }
-
-    __newobj__ = PyObject_GetAttrString(copy_reg, "__newobj__");
-    if (!__newobj__) {
-	Py_DECREF(copy_reg);
-	return;
-    }
-
-    if (!TimeStamp) {
-        m = PyImport_ImportModule("persistent.TimeStamp");
-        if (!m)
-	    return;
-        TimeStamp = PyObject_GetAttrString(m, "TimeStamp");
-        Py_DECREF(m);
-        /* fall through to immediate return on error */
-    }
-}

Copied: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPersistence.c (from rev 91360, Sandbox/wichert/ZODB38-jarn/src/persistent/cPersistence.c)
===================================================================
--- Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPersistence.c	                        (rev 0)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPersistence.c	2008-09-22 16:53:27 UTC (rev 91361)
@@ -0,0 +1,1259 @@
+/*****************************************************************************
+
+  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+  All Rights Reserved.
+
+  This software is subject to the provisions of the Zope Public License,
+  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+  FOR A PARTICULAR PURPOSE
+
+ ****************************************************************************/
+static char cPersistence_doc_string[] =
+"Defines Persistent mixin class for persistent objects.\n"
+"\n"
+"$Id$\n";
+
+#include "cPersistence.h"
+#include "structmember.h"
+
+struct ccobject_head_struct {
+    CACHE_HEAD
+};
+
+/* These two objects are initialized when the module is loaded */
+static PyObject *TimeStamp, *py_simple_new;
+
+/* Strings initialized by init_strings() below. */
+static PyObject *py_keys, *py_setstate, *py___dict__, *py_timeTime;
+static PyObject *py__p_changed, *py__p_deactivate;
+static PyObject *py___getattr__, *py___setattr__, *py___delattr__;
+static PyObject *py___slotnames__, *copy_reg_slotnames, *__newobj__;
+static PyObject *py___getnewargs__, *py___getstate__;
+
+
+static int
+init_strings(void)
+{
+#define INIT_STRING(S) \
+    if (!(py_ ## S = PyString_InternFromString(#S))) \
+	return -1;
+    INIT_STRING(keys);
+    INIT_STRING(setstate);
+    INIT_STRING(timeTime);
+    INIT_STRING(__dict__);
+    INIT_STRING(_p_changed);
+    INIT_STRING(_p_deactivate);
+    INIT_STRING(__getattr__);
+    INIT_STRING(__setattr__);
+    INIT_STRING(__delattr__);
+    INIT_STRING(__slotnames__);
+    INIT_STRING(__getnewargs__);
+    INIT_STRING(__getstate__);
+#undef INIT_STRING
+    return 0;
+}
+
+#ifdef Py_DEBUG
+static void
+fatal_1350(cPersistentObject *self, const char *caller, const char *detail)
+{
+	char buf[1000];
+
+	PyOS_snprintf(buf, sizeof(buf),
+	    "cPersistence.c %s(): object at %p with type %.200s\n"
+	    "%s.\n"
+	    "The only known cause is multiple threads trying to ghost and\n"
+	    "unghost the object simultaneously.\n"
+	    "That's not legal, but ZODB can't stop it.\n"
+	    "See Collector #1350.\n",
+	    caller, self, self->ob_type->tp_name, detail);
+	Py_FatalError(buf);
+}
+#endif
+
+static void ghostify(cPersistentObject*);
+
+/* Load the state of the object, unghostifying it.  Upon success, return 1.
+ * If an error occurred, re-ghostify the object and return -1.
+ */
+static int
+unghostify(cPersistentObject *self)
+{
+    if (self->state < 0 && self->jar) {
+        PyObject *r;
+
+        /* Is it ever possible to not have a cache? */
+        if (self->cache) {
+            /* Create a node in the ring for this unghostified object. */
+            self->cache->non_ghost_count++;
+	    self->cache->total_estimated_size += self->estimated_size;
+	    ring_add(&self->cache->ring_home, &self->ring);
+	    Py_INCREF(self);
+        }
+	/* set state to CHANGED while setstate() call is in progress
+	   to prevent a recursive call to _PyPersist_Load().
+	*/
+        self->state = cPersistent_CHANGED_STATE;
+        /* Call the object's __setstate__() */
+	r = PyObject_CallMethod(self->jar, "setstate", "O", (PyObject *)self);
+        if (r == NULL) {
+            ghostify(self);
+            return -1;
+        }
+        self->state = cPersistent_UPTODATE_STATE;
+        Py_DECREF(r);
+        if (self->cache && self->ring.r_next == NULL) {
+#ifdef Py_DEBUG
+        	fatal_1350(self, "unghostify",
+		    		 "is not in the cache despite that we just "
+		      		 "unghostified it");
+#else
+		PyErr_Format(PyExc_SystemError, "object at %p with type "
+			     "%.200s not in the cache despite that we just "
+			     "unghostified it", self, self->ob_type->tp_name);
+		return -1;
+#endif
+	}
+    }
+    return 1;
+}
+
+/****************************************************************************/
+
+static PyTypeObject Pertype;
+
+static void
+accessed(cPersistentObject *self)
+{
+    /* Do nothing unless the object is in a cache and not a ghost. */
+    if (self->cache && self->state >= 0 && self->ring.r_next)
+	ring_move_to_head(&self->cache->ring_home, &self->ring);
+}
+
+static void
+unlink_from_ring(cPersistentObject *self)
+{
+    /* If the cache has been cleared, then a non-ghost object
+       isn't in the ring any longer.
+    */
+    if (self->ring.r_next == NULL)
+	return;
+
+    /* if we're ghostifying an object, we better have some non-ghosts */
+    assert(self->cache->non_ghost_count > 0);
+    self->cache->non_ghost_count--;
+    self->cache->total_estimated_size -= self->estimated_size;
+    ring_del(&self->ring);
+}
+
+static void
+ghostify(cPersistentObject *self)
+{
+    PyObject **dictptr;
+
+    /* are we already a ghost? */
+    if (self->state == cPersistent_GHOST_STATE)
+        return;
+
+    /* Is it ever possible to not have a cache? */
+    if (self->cache == NULL) {
+        self->state = cPersistent_GHOST_STATE;
+        return;
+    }
+
+    if (self->ring.r_next == NULL) {
+	/* There's no way to raise an error in this routine. */
+#ifdef Py_DEBUG
+	fatal_1350(self, "ghostify", "claims to be in a cache but isn't");
+#else
+	return;
+#endif
+    }
+
+    /* If we're ghostifying an object, we better have some non-ghosts. */
+    assert(self->cache->non_ghost_count > 0);
+    self->cache->non_ghost_count--;
+    self->cache->total_estimated_size -= self->estimated_size;
+    ring_del(&self->ring);
+    self->state = cPersistent_GHOST_STATE;
+    dictptr = _PyObject_GetDictPtr((PyObject *)self);
+    if (dictptr && *dictptr) {
+	Py_DECREF(*dictptr);
+	*dictptr = NULL;
+    }
+
+    /* We remove the reference to the just ghosted object that the ring
+     * holds.  Note that the dictionary of oids->objects has an uncounted
+     * reference, so if the ring's reference was the only one, this frees
+     * the ghost object.  Note further that the object's dealloc knows to
+     * inform the dictionary that it is going away.
+     */
+    Py_DECREF(self);
+}
+
+static int
+changed(cPersistentObject *self)
+{
+  if ((self->state == cPersistent_UPTODATE_STATE ||
+       self->state == cPersistent_STICKY_STATE)
+       && self->jar)
+    {
+	PyObject *meth, *arg, *result;
+	static PyObject *s_register;
+
+	if (s_register == NULL)
+	    s_register = PyString_InternFromString("register");
+	meth = PyObject_GetAttr((PyObject *)self->jar, s_register);
+	if (meth == NULL)
+	    return -1;
+	arg = PyTuple_New(1);
+	if (arg == NULL) {
+	    Py_DECREF(meth);
+	    return -1;
+	}
+	Py_INCREF(self);
+	PyTuple_SET_ITEM(arg, 0, (PyObject *)self);
+	result = PyEval_CallObject(meth, arg);
+	Py_DECREF(arg);
+	Py_DECREF(meth);
+	if (result == NULL)
+	    return -1;
+	Py_DECREF(result);
+
+	self->state = cPersistent_CHANGED_STATE;
+    }
+
+  return 0;
+}
+
+static PyObject *
+Per__p_deactivate(cPersistentObject *self)
+{
+    if (self->state == cPersistent_UPTODATE_STATE && self->jar) {
+	PyObject **dictptr = _PyObject_GetDictPtr((PyObject *)self);
+	if (dictptr && *dictptr) {
+	    Py_DECREF(*dictptr);
+	    *dictptr = NULL;
+	}
+	/* Note that we need to set to ghost state unless we are
+	   called directly. Methods that override this need to
+	   do the same! */
+	ghostify(self);
+    }
+
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+static PyObject *
+Per__p_activate(cPersistentObject *self)
+{
+    if (unghostify(self) < 0)
+        return NULL;
+
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+static int Per_set_changed(cPersistentObject *self, PyObject *v);
+
+static PyObject *
+Per__p_invalidate(cPersistentObject *self)
+{
+    signed char old_state = self->state;
+
+    if (old_state != cPersistent_GHOST_STATE) {
+        if (Per_set_changed(self, NULL) < 0)
+            return NULL;
+        ghostify(self);
+    }
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+
+static PyObject *
+pickle_slotnames(PyTypeObject *cls)
+{
+    PyObject *slotnames;
+
+    slotnames = PyDict_GetItem(cls->tp_dict, py___slotnames__);
+    if (slotnames) {
+	Py_INCREF(slotnames);
+	return slotnames;
+    }
+
+    slotnames = PyObject_CallFunctionObjArgs(copy_reg_slotnames,
+					     (PyObject*)cls, NULL);
+    if (slotnames && !(slotnames == Py_None || PyList_Check(slotnames))) {
+	PyErr_SetString(PyExc_TypeError,
+			"copy_reg._slotnames didn't return a list or None");
+	Py_DECREF(slotnames);
+	return NULL;
+    }
+
+    return slotnames;
+}
+
+static PyObject *
+pickle_copy_dict(PyObject *state)
+{
+    PyObject *copy, *key, *value;
+    char *ckey;
+    Py_ssize_t pos = 0;
+
+    copy = PyDict_New();
+    if (!copy)
+	return NULL;
+
+    if (!state)
+	return copy;
+
+    while (PyDict_Next(state, &pos, &key, &value)) {
+	if (key && PyString_Check(key)) {
+	    ckey = PyString_AS_STRING(key);
+	    if (*ckey == '_' &&
+		(ckey[1] == 'v' || ckey[1] == 'p') &&
+		ckey[2] == '_')
+		/* skip volatile and persistent */
+		continue;
+        }
+
+	if (PyObject_SetItem(copy, key, value) < 0)
+	    goto err;
+    }
+
+    return copy;
+ err:
+    Py_DECREF(copy);
+    return NULL;
+}
+
+
+static char pickle___getstate__doc[] =
+"Get the object serialization state\n"
+"\n"
+"If the object has no assigned slots and has no instance dictionary, then \n"
+"None is returned.\n"
+"\n"
+"If the object has no assigned slots and has an instance dictionary, then \n"
+"the a copy of the instance dictionary is returned. The copy has any items \n"
+"with names starting with '_v_' or '_p_' ommitted.\n"
+"\n"
+"If the object has assigned slots, then a two-element tuple is returned.  \n"
+"The first element is either None or a copy of the instance dictionary, \n"
+"as described above. The second element is a dictionary with items \n"
+"for each of the assigned slots.\n"
+;
+
+static PyObject *
+pickle___getstate__(PyObject *self)
+{
+    PyObject *slotnames=NULL, *slots=NULL, *state=NULL;
+    PyObject **dictp;
+    int n=0;
+
+    slotnames = pickle_slotnames(self->ob_type);
+    if (!slotnames)
+	return NULL;
+
+    dictp = _PyObject_GetDictPtr(self);
+    if (dictp)
+	state = pickle_copy_dict(*dictp);
+    else {
+	state = Py_None;
+	Py_INCREF(state);
+    }
+
+    if (slotnames != Py_None) {
+	int i;
+
+	slots = PyDict_New();
+	if (!slots)
+	    goto end;
+
+	for (i = 0; i < PyList_GET_SIZE(slotnames); i++) {
+	    PyObject *name, *value;
+	    char *cname;
+
+	    name = PyList_GET_ITEM(slotnames, i);
+	    if (PyString_Check(name)) {
+		cname = PyString_AS_STRING(name);
+		if (*cname == '_' &&
+		    (cname[1] == 'v' || cname[1] == 'p') &&
+		    cname[2] == '_')
+		    /* skip volatile and persistent */
+		    continue;
+            }
+
+	    /* Unclear:  Will this go through our getattr hook? */
+	    value = PyObject_GetAttr(self, name);
+	    if (value == NULL)
+		PyErr_Clear();
+	    else {
+		int err = PyDict_SetItem(slots, name, value);
+		Py_DECREF(value);
+		if (err < 0)
+		    goto end;
+		n++;
+            }
+        }
+    }
+
+    if (n)
+	state = Py_BuildValue("(NO)", state, slots);
+
+ end:
+    Py_XDECREF(slotnames);
+    Py_XDECREF(slots);
+
+    return state;
+}
+
+static int
+pickle_setattrs_from_dict(PyObject *self, PyObject *dict)
+{
+    PyObject *key, *value;
+    Py_ssize_t pos = 0;
+
+    if (!PyDict_Check(dict)) {
+	PyErr_SetString(PyExc_TypeError, "Expected dictionary");
+	return -1;
+    }
+
+    while (PyDict_Next(dict, &pos, &key, &value)) {
+	if (PyObject_SetAttr(self, key, value) < 0)
+	    return -1;
+    }
+    return 0;
+}
+
+static char pickle___setstate__doc[] =
+"Set the object serialization state\n\n"
+"The state should be in one of 3 forms:\n\n"
+"- None\n\n"
+"  Ignored\n\n"
+"- A dictionary\n\n"
+"  In this case, the object's instance dictionary will be cleared and \n"
+"  updated with the new state.\n\n"
+"- A two-tuple with a string as the first element. \n\n"
+"  In this case, the method named by the string in the first element will be\n"
+"  called with the second element.\n\n"
+"  This form supports migration of data formats.\n\n"
+"- A two-tuple with None or a Dictionary as the first element and\n"
+"  with a dictionary as the second element.\n\n"
+"  If the first element is not None, then the object's instance dictionary \n"
+"  will be cleared and updated with the value.\n\n"
+"  The items in the second element will be assigned as attributes.\n"
+;
+
+static PyObject *
+pickle___setstate__(PyObject *self, PyObject *state)
+{
+    PyObject *slots=NULL;
+
+    if (PyTuple_Check(state)) {
+	if (!PyArg_ParseTuple(state, "OO:__setstate__", &state, &slots))
+	    return NULL;
+    }
+
+    if (state != Py_None) {
+	PyObject **dict;
+
+	dict = _PyObject_GetDictPtr(self);
+	if (dict) {
+	    if (!*dict) {
+		*dict = PyDict_New();
+		if (!*dict)
+		    return NULL;
+            }
+        }
+
+	if (*dict) {
+	    PyDict_Clear(*dict);
+	    if (PyDict_Update(*dict, state) < 0)
+		return NULL;
+        }
+	else if (pickle_setattrs_from_dict(self, state) < 0)
+	    return NULL;
+    }
+
+    if (slots && pickle_setattrs_from_dict(self, slots) < 0)
+	return NULL;
+
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+static char pickle___reduce__doc[] =
+"Reduce an object to contituent parts for serialization\n"
+;
+
+static PyObject *
+pickle___reduce__(PyObject *self)
+{
+    PyObject *args=NULL, *bargs=NULL, *state=NULL, *getnewargs=NULL;
+    int l, i;
+
+    getnewargs = PyObject_GetAttr(self, py___getnewargs__);
+    if (getnewargs) {
+	bargs = PyObject_CallFunctionObjArgs(getnewargs, NULL);
+	Py_DECREF(getnewargs);
+	if (!bargs)
+	    return NULL;
+	l = PyTuple_Size(bargs);
+	if (l < 0)
+	    goto end;
+    }
+    else {
+	PyErr_Clear();
+	l = 0;
+    }
+
+    args = PyTuple_New(l+1);
+    if (args == NULL)
+	goto end;
+
+    Py_INCREF(self->ob_type);
+    PyTuple_SET_ITEM(args, 0, (PyObject*)(self->ob_type));
+    for (i = 0; i < l; i++) {
+	Py_INCREF(PyTuple_GET_ITEM(bargs, i));
+	PyTuple_SET_ITEM(args, i+1, PyTuple_GET_ITEM(bargs, i));
+    }
+
+    state = PyObject_CallMethodObjArgs(self, py___getstate__, NULL);
+    if (!state)
+	goto end;
+
+    state = Py_BuildValue("(OON)", __newobj__, args, state);
+
+ end:
+    Py_XDECREF(bargs);
+    Py_XDECREF(args);
+
+    return state;
+}
+
+
+/* Return the object's state, a dict or None.
+
+   If the object has no dict, it's state is None.
+   Otherwise, return a dict containing all the attributes that
+   don't start with "_v_".
+
+   The caller should not modify this dict, as it may be a reference to
+   the object's __dict__.
+*/
+
+static PyObject *
+Per__getstate__(cPersistentObject *self)
+{
+    /* TODO:  Should it be an error to call __getstate__() on a ghost? */
+    if (unghostify(self) < 0)
+        return NULL;
+
+    /* TODO:  should we increment stickyness?  Tim doesn't understand that
+       question. S*/
+    return pickle___getstate__((PyObject*)self);
+}
+
+/* The Persistent base type provides a traverse function, but not a
+   clear function.  An instance of a Persistent subclass will have
+   its dict cleared through subtype_clear().
+
+   There is always a cycle between a persistent object and its cache.
+   When the cycle becomes unreachable, the clear function for the
+   cache will break the cycle.  Thus, the persistent object need not
+   have a clear function.  It would be complex to write a clear function
+   for the objects, if we needed one, because of the reference count
+   tricks done by the cache.
+*/
+
+static void
+Per_dealloc(cPersistentObject *self)
+{
+    if (self->state >= 0)
+	unlink_from_ring(self);
+    if (self->cache)
+	cPersistenceCAPI->percachedel(self->cache, self->oid);
+    Py_XDECREF(self->cache);
+    Py_XDECREF(self->jar);
+    Py_XDECREF(self->oid);
+    self->ob_type->tp_free(self);
+}
+
+static int
+Per_traverse(cPersistentObject *self, visitproc visit, void *arg)
+{
+    int err;
+
+#define VISIT(SLOT) \
+    if (SLOT) { \
+	err = visit((PyObject *)(SLOT), arg); \
+	if (err) \
+		     return err; \
+    }
+
+    VISIT(self->jar);
+    VISIT(self->oid);
+    VISIT(self->cache);
+
+#undef VISIT
+    return 0;
+}
+
+/* convert_name() returns a new reference to a string name
+   or sets an exception and returns NULL.
+*/
+
+static PyObject *
+convert_name(PyObject *name)
+{
+#ifdef Py_USING_UNICODE
+    /* The Unicode to string conversion is done here because the
+       existing tp_setattro slots expect a string object as name
+       and we wouldn't want to break those. */
+    if (PyUnicode_Check(name)) {
+	name = PyUnicode_AsEncodedString(name, NULL, NULL);
+    }
+    else
+#endif
+    if (!PyString_Check(name)) {
+	PyErr_SetString(PyExc_TypeError, "attribute name must be a string");
+	return NULL;
+    } else
+	Py_INCREF(name);
+    return name;
+}
+
+/* Returns true if the object requires unghostification.
+
+   There are several special attributes that we allow access to without
+   requiring that the object be unghostified:
+   __class__
+   __del__
+   __dict__
+   __of__
+   __setstate__
+*/
+
+static int
+unghost_getattr(const char *s)
+{
+    if (*s++ != '_')
+	return 1;
+    if (*s == 'p') {
+	s++;
+	if (*s == '_')
+	    return 0; /* _p_ */
+	else
+	    return 1;
+    }
+    else if (*s == '_') {
+	s++;
+	switch (*s) {
+	case 'c':
+	    return strcmp(s, "class__");
+	case 'd':
+	    s++;
+	    if (!strcmp(s, "el__"))
+		return 0; /* __del__ */
+	    if (!strcmp(s, "ict__"))
+		return 0; /* __dict__ */
+	    return 1;
+	case 'o':
+	    return strcmp(s, "of__");
+	case 's':
+	    return strcmp(s, "setstate__");
+	default:
+	    return 1;
+	}
+    }
+    return 1;
+}
+
+static PyObject*
+Per_getattro(cPersistentObject *self, PyObject *name)
+{
+    PyObject *result = NULL;	/* guilty until proved innocent */
+    char *s;
+
+    name = convert_name(name);
+    if (!name)
+	goto Done;
+    s = PyString_AS_STRING(name);
+
+    if (unghost_getattr(s)) {
+	if (unghostify(self) < 0)
+	    goto Done;
+	accessed(self);
+    }
+    result = PyObject_GenericGetAttr((PyObject *)self, name);
+
+  Done:
+    Py_XDECREF(name);
+    return result;
+}
+
+/* Exposed as _p_getattr method.  Test whether base getattr should be used */
+static PyObject *
+Per__p_getattr(cPersistentObject *self, PyObject *name)
+{
+    PyObject *result = NULL;	/* guilty until proved innocent */
+    char *s;
+
+    name = convert_name(name);
+    if (!name)
+	goto Done;
+    s = PyString_AS_STRING(name);
+
+    if (*s != '_' || unghost_getattr(s)) {
+	if (unghostify(self) < 0)
+	    goto Done;
+	accessed(self);
+        result = Py_False;
+    }
+    else
+	result = Py_True;
+
+    Py_INCREF(result);
+
+  Done:
+    Py_XDECREF(name);
+    return result;
+}
+
+/*
+   TODO:  we should probably not allow assignment of __class__ and __dict__.
+*/
+
+static int
+Per_setattro(cPersistentObject *self, PyObject *name, PyObject *v)
+{
+    int result = -1;	/* guilty until proved innocent */
+    char *s;
+
+    name = convert_name(name);
+    if (!name)
+	goto Done;
+    s = PyString_AS_STRING(name);
+
+    if (strncmp(s, "_p_", 3) != 0) {
+	if (unghostify(self) < 0)
+	    goto Done;
+	accessed(self);
+	if (strncmp(s, "_v_", 3) != 0
+	    && self->state != cPersistent_CHANGED_STATE) {
+	    if (changed(self) < 0)
+		goto Done;
+	}
+    }
+    result = PyObject_GenericSetAttr((PyObject *)self, name, v);
+
+ Done:
+    Py_XDECREF(name);
+    return result;
+}
+
+
+static int
+Per_p_set_or_delattro(cPersistentObject *self, PyObject *name, PyObject *v)
+{
+    int result = -1;	/* guilty until proved innocent */
+    char *s;
+
+    name = convert_name(name);
+    if (!name)
+	goto Done;
+    s = PyString_AS_STRING(name);
+
+    if (strncmp(s, "_p_", 3)) {
+	if (unghostify(self) < 0)
+	    goto Done;
+	accessed(self);
+
+        result = 0;
+    }
+    else {
+        if (PyObject_GenericSetAttr((PyObject *)self, name, v) < 0)
+	    goto Done;
+        result = 1;
+    }
+
+ Done:
+    Py_XDECREF(name);
+    return result;
+}
+
+static PyObject *
+Per__p_setattr(cPersistentObject *self, PyObject *args)
+{
+    PyObject *name, *v, *result;
+    int r;
+
+    if (!PyArg_ParseTuple(args, "OO:_p_setattr", &name, &v))
+	return NULL;
+
+    r = Per_p_set_or_delattro(self, name, v);
+    if (r < 0)
+	return NULL;
+
+    result = r ? Py_True : Py_False;
+    Py_INCREF(result);
+    return result;
+}
+
+static PyObject *
+Per__p_delattr(cPersistentObject *self, PyObject *name)
+{
+    int r;
+    PyObject *result;
+
+    r = Per_p_set_or_delattro(self, name, NULL);
+    if (r < 0)
+	return NULL;
+
+    result = r ? Py_True : Py_False;
+    Py_INCREF(result);
+    return result;
+}
+
+
+static PyObject *
+Per_get_changed(cPersistentObject *self)
+{
+    if (self->state < 0) {
+	Py_INCREF(Py_None);
+	return Py_None;
+    }
+    return PyBool_FromLong(self->state == cPersistent_CHANGED_STATE);
+}
+
+static int
+Per_set_changed(cPersistentObject *self, PyObject *v)
+{
+    int deactivate = 0;
+    int true;
+
+    if (!v) {
+	/* delattr is used to invalidate an object even if it has changed. */
+	if (self->state != cPersistent_GHOST_STATE)
+	    self->state = cPersistent_UPTODATE_STATE;
+	deactivate = 1;
+    }
+    else if (v == Py_None)
+	deactivate = 1;
+
+    if (deactivate) {
+	PyObject *res, *meth;
+	meth = PyObject_GetAttr((PyObject *)self, py__p_deactivate);
+	if (meth == NULL)
+	    return -1;
+	res = PyObject_CallObject(meth, NULL);
+	if (res)
+	    Py_DECREF(res);
+	else {
+	    /* an error occured in _p_deactivate().
+
+	    It's not clear what we should do here.  The code is
+	    obviously ignoring the exception, but it shouldn't return
+	    0 for a getattr and set an exception.  The simplest change
+	    is to clear the exception, but that simply masks the
+	    error.
+
+	    This prints an error to stderr just like exceptions in
+	    __del__().  It would probably be better to log it but that
+	    would be painful from C.
+	    */
+	    PyErr_WriteUnraisable(meth);
+	}
+	Py_DECREF(meth);
+	return 0;
+    }
+    /* !deactivate.  If passed a true argument, mark self as changed (starting
+     * with ZODB 3.6, that includes activating the object if it's a ghost).
+     * If passed a false argument, and the object isn't a ghost, set the
+     * state as up-to-date.
+     */
+    true = PyObject_IsTrue(v);
+    if (true == -1)
+	return -1;
+    if (true) {
+    	if (self->state < 0) {
+    	    if (unghostify(self) < 0)
+    	        return -1;
+    	    }
+	return changed(self);
+    }
+
+    /* We were passed a false, non-None argument.  If we're not a ghost,
+     * mark self as up-to-date.
+     */
+    if (self->state >= 0)
+	self->state = cPersistent_UPTODATE_STATE;
+    return 0;
+}
+
+static PyObject *
+Per_get_oid(cPersistentObject *self)
+{
+    PyObject *oid = self->oid ? self->oid : Py_None;
+    Py_INCREF(oid);
+    return oid;
+}
+
+static int
+Per_set_oid(cPersistentObject *self, PyObject *v)
+{
+    if (self->cache) {
+	int result;
+
+	if (v == NULL) {
+	    PyErr_SetString(PyExc_ValueError,
+			    "can't delete _p_oid of cached object");
+	    return -1;
+	}
+	if (PyObject_Cmp(self->oid, v, &result) < 0)
+	    return -1;
+	if (result) {
+	    PyErr_SetString(PyExc_ValueError,
+			    "can not change _p_oid of cached object");
+	    return -1;
+	}
+    }
+    Py_XDECREF(self->oid);
+    Py_XINCREF(v);
+    self->oid = v;
+    return 0;
+}
+
+static PyObject *
+Per_get_jar(cPersistentObject *self)
+{
+    PyObject *jar = self->jar ? self->jar : Py_None;
+    Py_INCREF(jar);
+    return jar;
+}
+
+static int
+Per_set_jar(cPersistentObject *self, PyObject *v)
+{
+    if (self->cache) {
+	int result;
+
+	if (v == NULL) {
+	    PyErr_SetString(PyExc_ValueError,
+			    "can't delete _p_jar of cached object");
+	    return -1;
+	}
+	if (PyObject_Cmp(self->jar, v, &result) < 0)
+	    return -1;
+	if (result) {
+	    PyErr_SetString(PyExc_ValueError,
+			    "can not change _p_jar of cached object");
+	    return -1;
+	}
+    }
+    Py_XDECREF(self->jar);
+    Py_XINCREF(v);
+    self->jar = v;
+    return 0;
+}
+
+static PyObject *
+Per_get_serial(cPersistentObject *self)
+{
+    return PyString_FromStringAndSize(self->serial, 8);
+}
+
+static int
+Per_set_serial(cPersistentObject *self, PyObject *v)
+{
+    if (v) {
+	if (PyString_Check(v) && PyString_GET_SIZE(v) == 8)
+	    memcpy(self->serial, PyString_AS_STRING(v), 8);
+	else {
+	    PyErr_SetString(PyExc_ValueError,
+			    "_p_serial must be an 8-character string");
+	    return -1;
+	}
+    } else
+	memset(self->serial, 0, 8);
+    return 0;
+}
+
+static PyObject *
+Per_get_mtime(cPersistentObject *self)
+{
+    PyObject *t, *v;
+
+    if (unghostify(self) < 0)
+	return NULL;
+
+    accessed(self);
+
+    if (memcmp(self->serial, "\0\0\0\0\0\0\0\0", 8) == 0) {
+	Py_INCREF(Py_None);
+	return Py_None;
+    }
+
+    t = PyObject_CallFunction(TimeStamp, "s#", self->serial, 8);
+    if (!t)
+	return NULL;
+    v = PyObject_CallMethod(t, "timeTime", "");
+    Py_DECREF(t);
+    return v;
+}
+
+static PyObject *
+Per_get_state(cPersistentObject *self)
+{
+    return PyInt_FromLong(self->state);
+}
+
+static PyObject *
+Per_get_estimated_size(cPersistentObject *self)
+{
+  return PyInt_FromLong(self->estimated_size);
+}
+
+static int
+Per_set_estimated_size(cPersistentObject *self, PyObject *v)
+{
+    if (v) {
+        if (PyInt_Check(v)) {
+	    if (PyInt_AS_LONG(v) < 0) {
+	        PyErr_SetString(PyExc_ValueError,
+			        "_p_estimated_size must not be negative");
+	        return -1;
+	    }
+	    self->estimated_size = PyInt_AS_LONG(v);
+	}
+	else {
+	    PyErr_SetString(PyExc_ValueError,
+			    "_p_estimated_size must be an integer");
+	    return -1;
+	}
+    } else
+        self->estimated_size = 0;
+    return 0;
+}
+
+static PyGetSetDef Per_getsets[] = {
+    {"_p_changed", (getter)Per_get_changed, (setter)Per_set_changed},
+    {"_p_jar", (getter)Per_get_jar, (setter)Per_set_jar},
+    {"_p_mtime", (getter)Per_get_mtime},
+    {"_p_oid", (getter)Per_get_oid, (setter)Per_set_oid},
+    {"_p_serial", (getter)Per_get_serial, (setter)Per_set_serial},
+    {"_p_state", (getter)Per_get_state},
+    {"_p_estimated_size",
+     (getter)Per_get_estimated_size, (setter)Per_set_estimated_size
+    },
+    {NULL}
+};
+
+static struct PyMethodDef Per_methods[] = {
+  {"_p_deactivate", (PyCFunction)Per__p_deactivate, METH_NOARGS,
+   "_p_deactivate() -- Deactivate the object"},
+  {"_p_activate", (PyCFunction)Per__p_activate, METH_NOARGS,
+   "_p_activate() -- Activate the object"},
+  {"_p_invalidate", (PyCFunction)Per__p_invalidate, METH_NOARGS,
+   "_p_invalidate() -- Invalidate the object"},
+  {"_p_getattr", (PyCFunction)Per__p_getattr, METH_O,
+   "_p_getattr(name) -- Test whether the base class must handle the name\n"
+   "\n"
+   "The method unghostifies the object, if necessary.\n"
+   "The method records the object access, if necessary.\n"
+   "\n"
+   "This method should be called by subclass __getattribute__\n"
+   "implementations before doing anything else. If the method\n"
+   "returns True, then __getattribute__ implementations must delegate\n"
+   "to the base class, Persistent.\n"
+  },
+  {"_p_setattr", (PyCFunction)Per__p_setattr, METH_VARARGS,
+   "_p_setattr(name, value) -- Save persistent meta data\n"
+   "\n"
+   "This method should be called by subclass __setattr__ implementations\n"
+   "before doing anything else.  If it returns true, then the attribute\n"
+   "was handled by the base class.\n"
+   "\n"
+   "The method unghostifies the object, if necessary.\n"
+   "The method records the object access, if necessary.\n"
+  },
+  {"_p_delattr", (PyCFunction)Per__p_delattr, METH_O,
+   "_p_delattr(name) -- Delete persistent meta data\n"
+   "\n"
+   "This method should be called by subclass __delattr__ implementations\n"
+   "before doing anything else.  If it returns true, then the attribute\n"
+   "was handled by the base class.\n"
+   "\n"
+   "The method unghostifies the object, if necessary.\n"
+   "The method records the object access, if necessary.\n"
+  },
+  {"__getstate__", (PyCFunction)Per__getstate__, METH_NOARGS,
+   pickle___getstate__doc },
+  {"__setstate__", (PyCFunction)pickle___setstate__, METH_O,
+   pickle___setstate__doc},
+  {"__reduce__", (PyCFunction)pickle___reduce__, METH_NOARGS,
+   pickle___reduce__doc},
+
+  {NULL,		NULL}		/* sentinel */
+};
+
+/* This module is compiled as a shared library.  Some compilers don't
+   allow addresses of Python objects defined in other libraries to be
+   used in static initializers here.  The DEFERRED_ADDRESS macro is
+   used to tag the slots where such addresses appear; the module init
+   function must fill in the tagged slots at runtime.  The argument is
+   for documentation -- the macro ignores it.
+*/
+#define DEFERRED_ADDRESS(ADDR) 0
+
+static PyTypeObject Pertype = {
+    PyObject_HEAD_INIT(DEFERRED_ADDRESS(&PyPersist_MetaType))
+    0,					/* ob_size */
+    "persistent.Persistent",		/* tp_name */
+    sizeof(cPersistentObject),		/* tp_basicsize */
+    0,					/* tp_itemsize */
+    (destructor)Per_dealloc,		/* tp_dealloc */
+    0,					/* tp_print */
+    0,					/* tp_getattr */
+    0,					/* tp_setattr */
+    0,					/* tp_compare */
+    0,					/* tp_repr */
+    0,					/* tp_as_number */
+    0,					/* tp_as_sequence */
+    0,					/* tp_as_mapping */
+    0,					/* tp_hash */
+    0,					/* tp_call */
+    0,					/* tp_str */
+    (getattrofunc)Per_getattro,		/* tp_getattro */
+    (setattrofunc)Per_setattro,		/* tp_setattro */
+    0,					/* tp_as_buffer */
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
+    					/* tp_flags */
+    0,					/* tp_doc */
+    (traverseproc)Per_traverse,		/* tp_traverse */
+    0,					/* tp_clear */
+    0,					/* tp_richcompare */
+    0,					/* tp_weaklistoffset */
+    0,					/* tp_iter */
+    0,					/* tp_iternext */
+    Per_methods,			/* tp_methods */
+    0,					/* tp_members */
+    Per_getsets,			/* tp_getset */
+};
+
+/* End of code for Persistent objects */
+/* -------------------------------------------------------- */
+
+typedef int (*intfunctionwithpythonarg)(PyObject*);
+
+/* Load the object's state if necessary and become sticky */
+static int
+Per_setstate(cPersistentObject *self)
+{
+    if (unghostify(self) < 0)
+        return -1;
+    self->state = cPersistent_STICKY_STATE;
+    return 0;
+}
+
+static PyObject *
+simple_new(PyObject *self, PyObject *type_object)
+{
+    return PyType_GenericNew((PyTypeObject *)type_object, NULL, NULL);
+}
+
+static PyMethodDef cPersistence_methods[] = {
+    {"simple_new", simple_new, METH_O,
+     "Create an object by simply calling a class's __new__ method without "
+     "arguments."},
+    {NULL, NULL}
+};
+
+
+static cPersistenceCAPIstruct
+truecPersistenceCAPI = {
+    &Pertype,
+    (getattrofunc)Per_getattro,	/*tp_getattr with object key*/
+    (setattrofunc)Per_setattro,	/*tp_setattr with object key*/
+    changed,
+    accessed,
+    ghostify,
+    (intfunctionwithpythonarg)Per_setstate,
+    NULL /* The percachedel slot is initialized in cPickleCache.c when
+	    the module is loaded.  It uses a function in a different
+	    shared library. */
+};
+
+void
+initcPersistence(void)
+{
+    PyObject *m, *s;
+    PyObject *copy_reg;
+
+    if (init_strings() < 0)
+      return;
+
+    m = Py_InitModule3("cPersistence", cPersistence_methods,
+		       cPersistence_doc_string);
+
+    Pertype.ob_type = &PyType_Type;
+    Pertype.tp_new = PyType_GenericNew;
+    if (PyType_Ready(&Pertype) < 0)
+	return;
+    if (PyModule_AddObject(m, "Persistent", (PyObject *)&Pertype) < 0)
+	return;
+
+    cPersistenceCAPI = &truecPersistenceCAPI;
+    s = PyCObject_FromVoidPtr(cPersistenceCAPI, NULL);
+    if (!s)
+	return;
+    if (PyModule_AddObject(m, "CAPI", s) < 0)
+	return;
+
+    if (PyModule_AddIntConstant(m, "GHOST", cPersistent_GHOST_STATE) < 0)
+	return;
+
+    if (PyModule_AddIntConstant(m, "UPTODATE", cPersistent_UPTODATE_STATE) < 0)
+	return;
+
+    if (PyModule_AddIntConstant(m, "CHANGED", cPersistent_CHANGED_STATE) < 0)
+	return;
+
+    if (PyModule_AddIntConstant(m, "STICKY", cPersistent_STICKY_STATE) < 0)
+	return;
+
+    py_simple_new = PyObject_GetAttrString(m, "simple_new");
+    if (!py_simple_new)
+        return;
+
+    copy_reg = PyImport_ImportModule("copy_reg");
+    if (!copy_reg)
+	return;
+
+    copy_reg_slotnames = PyObject_GetAttrString(copy_reg, "_slotnames");
+    if (!copy_reg_slotnames) {
+	Py_DECREF(copy_reg);
+	return;
+    }
+
+    __newobj__ = PyObject_GetAttrString(copy_reg, "__newobj__");
+    if (!__newobj__) {
+	Py_DECREF(copy_reg);
+	return;
+    }
+
+    if (!TimeStamp) {
+        m = PyImport_ImportModule("persistent.TimeStamp");
+        if (!m)
+	    return;
+        TimeStamp = PyObject_GetAttrString(m, "TimeStamp");
+        Py_DECREF(m);
+        /* fall through to immediate return on error */
+    }
+}

Deleted: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPersistence.h
===================================================================
--- Sandbox/wichert/ZODB38-jarn/src/persistent/cPersistence.h	2008-09-22 16:32:13 UTC (rev 91359)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPersistence.h	2008-09-22 16:53:27 UTC (rev 91361)
@@ -1,130 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-#ifndef CPERSISTENCE_H
-#define CPERSISTENCE_H
-
-#include "Python.h"
-#include "py24compat.h"
-
-#include "ring.h"
-
-#define CACHE_HEAD \
-    PyObject_HEAD \
-    CPersistentRing ring_home; \
-    int non_ghost_count;
-
-struct ccobject_head_struct;
-
-typedef struct ccobject_head_struct PerCache;
-
-/* How big is a persistent object?
-
-   12  PyGC_Head is two pointers and an int
-    8  PyObject_HEAD is an int and a pointer
- 
-   12  jar, oid, cache pointers
-    8  ring struct
-    8  serialno
-    4  state + extra
-
-  (52) so far
-
-    4  dict ptr
-    4  weaklist ptr
-  -------------------------
-   64  only need 62, but obmalloc rounds up to multiple of eight
-
-  Even a ghost requires 64 bytes.  It's possible to make a persistent
-  instance with slots and no dict, which changes the storage needed.
-
-*/
-
-#define cPersistent_HEAD \
-    PyObject_HEAD \
-    PyObject *jar; \
-    PyObject *oid; \
-    PerCache *cache; \
-    CPersistentRing ring; \
-    char serial[8]; \
-    signed char state; \
-    unsigned char reserved[3];
-
-#define cPersistent_GHOST_STATE -1
-#define cPersistent_UPTODATE_STATE 0
-#define cPersistent_CHANGED_STATE 1
-#define cPersistent_STICKY_STATE 2
-
-typedef struct {
-    cPersistent_HEAD
-} cPersistentObject;
-
-typedef void (*percachedelfunc)(PerCache *, PyObject *);
-
-typedef struct {
-    PyTypeObject *pertype;
-    getattrofunc getattro;
-    setattrofunc setattro;
-    int (*changed)(cPersistentObject*);
-    void (*accessed)(cPersistentObject*);
-    void (*ghostify)(cPersistentObject*);
-    int (*setstate)(PyObject*);
-    percachedelfunc percachedel;
-} cPersistenceCAPIstruct;
-
-#define cPersistenceType cPersistenceCAPI->pertype
-
-#ifndef DONT_USE_CPERSISTENCECAPI
-static cPersistenceCAPIstruct *cPersistenceCAPI;
-#endif
-
-#define cPersistanceModuleName "cPersistence"
-
-#define PER_TypeCheck(O) PyObject_TypeCheck((O), cPersistenceCAPI->pertype)
-
-#define PER_USE_OR_RETURN(O,R) {if((O)->state==cPersistent_GHOST_STATE && cPersistenceCAPI->setstate((PyObject*)(O)) < 0) return (R); else if ((O)->state==cPersistent_UPTODATE_STATE) (O)->state=cPersistent_STICKY_STATE;}
-
-#define PER_CHANGED(O) (cPersistenceCAPI->changed((cPersistentObject*)(O)))
-
-#define PER_GHOSTIFY(O) (cPersistenceCAPI->ghostify((cPersistentObject*)(O)))
-
-/* If the object is sticky, make it non-sticky, so that it can be ghostified.
-   The value is not meaningful
- */
-#define PER_ALLOW_DEACTIVATION(O) ((O)->state==cPersistent_STICKY_STATE && ((O)->state=cPersistent_UPTODATE_STATE))
-
-#define PER_PREVENT_DEACTIVATION(O)  ((O)->state==cPersistent_UPTODATE_STATE && ((O)->state=cPersistent_STICKY_STATE))
-
-/* 
-   Make a persistent object usable from C by:
-
-   - Making sure it is not a ghost
-
-   - Making it sticky.
-
-   IMPORTANT: If you call this and don't call PER_ALLOW_DEACTIVATION, 
-              your object will not be ghostified.
-
-   PER_USE returns a 1 on success and 0 failure, where failure means
-   error.
- */
-#define PER_USE(O) \
-(((O)->state != cPersistent_GHOST_STATE \
-  || (cPersistenceCAPI->setstate((PyObject*)(O)) >= 0)) \
- ? (((O)->state==cPersistent_UPTODATE_STATE) \
-    ? ((O)->state=cPersistent_STICKY_STATE) : 1) : 0)
-
-#define PER_ACCESSED(O)  (cPersistenceCAPI->accessed((cPersistentObject*)(O)))
-
-#endif

Copied: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPersistence.h (from rev 91360, Sandbox/wichert/ZODB38-jarn/src/persistent/cPersistence.h)
===================================================================
--- Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPersistence.h	                        (rev 0)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPersistence.h	2008-09-22 16:53:27 UTC (rev 91361)
@@ -0,0 +1,133 @@
+/*****************************************************************************
+
+  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+  All Rights Reserved.
+
+  This software is subject to the provisions of the Zope Public License,
+  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+  FOR A PARTICULAR PURPOSE
+
+ ****************************************************************************/
+
+#ifndef CPERSISTENCE_H
+#define CPERSISTENCE_H
+
+#include "Python.h"
+#include "py24compat.h"
+
+#include "ring.h"
+
+#define CACHE_HEAD \
+    PyObject_HEAD \
+    CPersistentRing ring_home; \
+    int non_ghost_count; \
+    PY_LONG_LONG total_estimated_size;   /* total estimated size of items in cache */
+
+struct ccobject_head_struct;
+
+typedef struct ccobject_head_struct PerCache;    
+
+/* How big is a persistent object?
+
+   12  PyGC_Head is two pointers and an int
+    8  PyObject_HEAD is an int and a pointer
+ 
+   12  jar, oid, cache pointers
+    8  ring struct
+    8  serialno
+    4  state + extra
+    4  size info
+
+  (56) so far
+
+    4  dict ptr
+    4  weaklist ptr
+  -------------------------
+   68  only need 62, but obmalloc rounds up to multiple of eight
+
+  Even a ghost requires 64 bytes.  It's possible to make a persistent
+  instance with slots and no dict, which changes the storage needed.
+
+*/
+
+#define cPersistent_HEAD \
+    PyObject_HEAD \
+    PyObject *jar; \
+    PyObject *oid; \
+    PerCache *cache; \
+    CPersistentRing ring; \
+    char serial[8]; \
+    signed char state; \
+    unsigned char reserved[3]; \
+    unsigned long estimated_size;
+
+#define cPersistent_GHOST_STATE -1
+#define cPersistent_UPTODATE_STATE 0
+#define cPersistent_CHANGED_STATE 1
+#define cPersistent_STICKY_STATE 2
+
+typedef struct {
+    cPersistent_HEAD
+} cPersistentObject;
+
+typedef void (*percachedelfunc)(PerCache *, PyObject *);
+
+typedef struct {
+    PyTypeObject *pertype;
+    getattrofunc getattro;
+    setattrofunc setattro;
+    int (*changed)(cPersistentObject*);
+    void (*accessed)(cPersistentObject*);
+    void (*ghostify)(cPersistentObject*);
+    int (*setstate)(PyObject*);
+    percachedelfunc percachedel;
+} cPersistenceCAPIstruct;
+
+#define cPersistenceType cPersistenceCAPI->pertype
+
+#ifndef DONT_USE_CPERSISTENCECAPI
+static cPersistenceCAPIstruct *cPersistenceCAPI;
+#endif
+
+#define cPersistanceModuleName "cPersistence"
+
+#define PER_TypeCheck(O) PyObject_TypeCheck((O), cPersistenceCAPI->pertype)
+
+#define PER_USE_OR_RETURN(O,R) {if((O)->state==cPersistent_GHOST_STATE && cPersistenceCAPI->setstate((PyObject*)(O)) < 0) return (R); else if ((O)->state==cPersistent_UPTODATE_STATE) (O)->state=cPersistent_STICKY_STATE;}
+
+#define PER_CHANGED(O) (cPersistenceCAPI->changed((cPersistentObject*)(O)))
+
+#define PER_GHOSTIFY(O) (cPersistenceCAPI->ghostify((cPersistentObject*)(O)))
+
+/* If the object is sticky, make it non-sticky, so that it can be ghostified.
+   The value is not meaningful
+ */
+#define PER_ALLOW_DEACTIVATION(O) ((O)->state==cPersistent_STICKY_STATE && ((O)->state=cPersistent_UPTODATE_STATE))
+
+#define PER_PREVENT_DEACTIVATION(O)  ((O)->state==cPersistent_UPTODATE_STATE && ((O)->state=cPersistent_STICKY_STATE))
+
+/* 
+   Make a persistent object usable from C by:
+
+   - Making sure it is not a ghost
+
+   - Making it sticky.
+
+   IMPORTANT: If you call this and don't call PER_ALLOW_DEACTIVATION, 
+              your object will not be ghostified.
+
+   PER_USE returns a 1 on success and 0 failure, where failure means
+   error.
+ */
+#define PER_USE(O) \
+(((O)->state != cPersistent_GHOST_STATE \
+  || (cPersistenceCAPI->setstate((PyObject*)(O)) >= 0)) \
+ ? (((O)->state==cPersistent_UPTODATE_STATE) \
+    ? ((O)->state=cPersistent_STICKY_STATE) : 1) : 0)
+
+#define PER_ACCESSED(O)  (cPersistenceCAPI->accessed((cPersistentObject*)(O)))
+
+#endif

Deleted: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPickleCache.c
===================================================================
--- Sandbox/wichert/ZODB38-jarn/src/persistent/cPickleCache.c	2008-09-22 16:32:13 UTC (rev 91359)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPickleCache.c	2008-09-22 16:53:27 UTC (rev 91361)
@@ -1,1116 +0,0 @@
- /*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-/*
-
-Objects are stored under three different regimes:
-
-Regime 1: Persistent Classes
-
-Persistent Classes are part of ZClasses. They are stored in the
-self->data dictionary, and are never garbage collected.
-
-The klass_items() method returns a sequence of (oid,object) tuples for
-every Persistent Class, which should make it possible to implement
-garbage collection in Python if necessary.
-
-Regime 2: Ghost Objects
-
-There is no benefit to keeping a ghost object which has no external
-references, therefore a weak reference scheme is used to ensure that
-ghost objects are removed from memory as soon as possible, when the
-last external reference is lost.
-
-Ghost objects are stored in the self->data dictionary. Normally a
-dictionary keeps a strong reference on its values, however this
-reference count is 'stolen'.
-
-This weak reference scheme leaves a dangling reference, in the
-dictionary, when the last external reference is lost. To clean up this
-dangling reference the persistent object dealloc function calls
-self->cache->_oid_unreferenced(self->oid). The cache looks up the oid
-in the dictionary, ensures it points to an object whose reference
-count is zero, then removes it from the dictionary. Before removing
-the object from the dictionary it must temporarily resurrect the
-object in much the same way that class instances are resurrected
-before their __del__ is called.
-
-Since ghost objects are stored under a different regime to non-ghost
-objects, an extra ghostify function in cPersistenceAPI replaces
-self->state=GHOST_STATE assignments that were common in other
-persistent classes (such as BTrees).
-
-Regime 3: Non-Ghost Objects
-
-Non-ghost objects are stored in two data structures: the dictionary
-mapping oids to objects and a doubly-linked list that encodes the
-order in which the objects were accessed.  The dictionary reference is
-borrowed, as it is for ghosts.  The list reference is a new reference;
-the list stores recently used objects, even if they are otherwise
-unreferenced, to avoid loading the object from the database again.
-
-The doubly-link-list nodes contain next and previous pointers linking
-together the cache and all non-ghost persistent objects.
-
-The node embedded in the cache is the home position. On every
-attribute access a non-ghost object will relink itself just behind the
-home position in the ring. Objects accessed least recently will
-eventually find themselves positioned after the home position.
-
-Occasionally other nodes are temporarily inserted in the ring as
-position markers. The cache contains a ring_lock flag which must be
-set and unset before and after doing so. Only if the flag is unset can
-the cache assume that all nodes are either his own home node, or nodes
-from persistent objects. This assumption is useful during the garbage
-collection process.
-
-The number of non-ghost objects is counted in self->non_ghost_count.
-The garbage collection process consists of traversing the ring, and
-deactivating (that is, turning into a ghost) every object until
-self->non_ghost_count is down to the target size, or until it
-reaches the home position again.
-
-Note that objects in the sticky or changed states are still kept in
-the ring, however they can not be deactivated. The garbage collection
-process must skip such objects, rather than deactivating them.
-
-*/
-
-static char cPickleCache_doc_string[] =
-"Defines the PickleCache used by ZODB Connection objects.\n"
-"\n"
-"$Id$\n";
-
-#define DONT_USE_CPERSISTENCECAPI
-#include "cPersistence.h"
-#include "structmember.h"
-#include <time.h>
-#include <stddef.h>
-#undef Py_FindMethod
-
-/* Python string objects to speed lookups; set by module init. */
-static PyObject *py__p_changed;
-static PyObject *py__p_deactivate;
-static PyObject *py__p_jar;
-static PyObject *py__p_oid;
-
-static cPersistenceCAPIstruct *capi;
-
-/* This object is the pickle cache.  The CACHE_HEAD macro guarantees
-   that layout of this struct is the same as the start of
-   ccobject_head in cPersistence.c */
-typedef struct {
-    CACHE_HEAD
-    int klass_count;                     /* count of persistent classes */
-    PyObject *data;                      /* oid -> object dict */
-    PyObject *jar;                       /* Connection object */
-    int cache_size;                      /* target number of items in cache */
-
-    /* Most of the time the ring contains only:
-       * many nodes corresponding to persistent objects
-       * one 'home' node from the cache.
-    In some cases it is handy to temporarily add other types
-    of node into the ring as placeholders. 'ring_lock' is a boolean
-    indicating that someone has already done this. Currently this
-    is only used by the garbage collection code. */
-
-    int ring_lock;
-
-    /* 'cache_drain_resistance' controls how quickly the cache size will drop
-    when it is smaller than the configured size. A value of zero means it will
-    not drop below the configured size (suitable for most caches). Otherwise,
-    it will remove cache_non_ghost_count/cache_drain_resistance items from
-    the cache every time (suitable for rarely used caches, such as those
-    associated with Zope versions. */
-
-    int cache_drain_resistance;
-
-} ccobject;
-
-static int cc_ass_sub(ccobject *self, PyObject *key, PyObject *v);
-
-/* ---------------------------------------------------------------- */
-
-#define OBJECT_FROM_RING(SELF, HERE) \
-    ((cPersistentObject *)(((char *)here) - offsetof(cPersistentObject, ring)))
-
-/* Insert self into the ring, following after. */
-static void
-insert_after(CPersistentRing *self, CPersistentRing *after)
-{
-    assert(self != NULL);
-    assert(after != NULL);
-    self->r_prev = after;
-    self->r_next = after->r_next;
-    after->r_next->r_prev = self;
-    after->r_next = self;
-}
-
-/* Remove self from the ring. */
-static void
-unlink_from_ring(CPersistentRing *self)
-{
-    assert(self != NULL);
-    self->r_prev->r_next = self->r_next;
-    self->r_next->r_prev = self->r_prev;
-}
-
-static int
-scan_gc_items(ccobject *self, int target)
-{
-    /* This function must only be called with the ring lock held,
-       because it places non-object placeholders in the ring.
-    */
-    cPersistentObject *object;
-    CPersistentRing *here;
-    CPersistentRing before_original_home;
-    int result = -1;   /* guilty until proved innocent */
-
-    /* Scan the ring, from least to most recently used, deactivating
-     * up-to-date objects, until we either find the ring_home again or
-     * or we've ghosted enough objects to reach the target size.
-     * Tricky:  __getattr__ and __del__ methods can do anything, and in
-     * particular if we ghostify an object with a __del__ method, that method
-     * can load the object again, putting it back into the MRU part of the
-     * ring.  Waiting to find ring_home again can thus cause an infinite
-     * loop (Collector #1208).  So before_original_home records the MRU
-     * position we start with, and we stop the scan when we reach that.
-     */
-    insert_after(&before_original_home, self->ring_home.r_prev);
-    here = self->ring_home.r_next;   /* least recently used object */
-    while (here != &before_original_home && self->non_ghost_count > target) {
-	assert(self->ring_lock);
-	assert(here != &self->ring_home);
-
-        /* At this point we know that the ring only contains nodes
-	   from persistent objects, plus our own home node.  We know
-	   this because the ring lock is held.  We can safely assume
-	   the current ring node is a persistent object now we know it
-	   is not the home */
-        object = OBJECT_FROM_RING(self, here);
-
-        if (object->state == cPersistent_UPTODATE_STATE) {
-            CPersistentRing placeholder;
-            PyObject *method;
-            PyObject *temp;
-            int error_occurred = 0;
-            /* deactivate it. This is the main memory saver. */
-
-            /* Add a placeholder, a dummy node in the ring.  We need
-	       to do this to mark our position in the ring.  It is
-	       possible that the PyObject_GetAttr() call below will
-	       invoke a __getattr__() hook in Python.  Also possible
-	       that deactivation will lead to a __del__ method call.
-	       So another thread might run, and mutate the ring as a side
-	       effect of object accesses.  There's no predicting then where
-	       in the ring here->next will point after that.  The
-	       placeholder won't move as a side effect of calling Python
-	       code.
-	    */
-            insert_after(&placeholder, here);
-	    method = PyObject_GetAttr((PyObject *)object, py__p_deactivate);
-	    if (method == NULL)
-	        error_occurred = 1;
-	    else {
- 		temp = PyObject_CallObject(method, NULL);
-                Py_DECREF(method);
-	        if (temp == NULL)
-	            error_occurred = 1;
-	    }
-
-            here = placeholder.r_next;
-            unlink_from_ring(&placeholder);
-            if (error_occurred)
-                goto Done;
-        }
-        else
-            here = here->r_next;
-    }
-    result = 0;
- Done:
-    unlink_from_ring(&before_original_home);
-    return result;
-}
-
-static PyObject *
-lockgc(ccobject *self, int target_size)
-{
-    /* This is thread-safe because of the GIL, and there's nothing
-     * in between checking the ring_lock and acquiring it that calls back
-     * into Python.
-     */
-    if (self->ring_lock) {
-        Py_INCREF(Py_None);
-        return Py_None;
-    }
-
-    self->ring_lock = 1;
-    if (scan_gc_items(self, target_size) < 0) {
-        self->ring_lock = 0;
-        return NULL;
-    }
-    self->ring_lock = 0;
-
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-
-static PyObject *
-cc_incrgc(ccobject *self, PyObject *args)
-{
-    int obsolete_arg = -999;
-    int starting_size = self->non_ghost_count;
-    int target_size = self->cache_size;
-
-    if (self->cache_drain_resistance >= 1) {
-        /* This cache will gradually drain down to a small size. Check
-           a (small) number of objects proportional to the current size */
-
-        int target_size_2 = (starting_size - 1
-			     - starting_size / self->cache_drain_resistance);
-        if (target_size_2 < target_size)
-            target_size = target_size_2;
-    }
-
-
-    if (!PyArg_ParseTuple(args, "|i:incrgc", &obsolete_arg))
-	return NULL;
-
-    if (obsolete_arg != -999
-        &&
-        (PyErr_Warn(PyExc_DeprecationWarning,
-                    "No argument expected")
-         < 0))
-        return NULL;
-
-    return lockgc(self, target_size);
-}
-
-static PyObject *
-cc_full_sweep(ccobject *self, PyObject *args)
-{
-    int dt = -999;
-
-    /* TODO:  This should be deprecated;  */
-
-    if (!PyArg_ParseTuple(args, "|i:full_sweep", &dt))
-	return NULL;
-    if (dt == -999)
-	return lockgc(self, 0);
-    else
-	return cc_incrgc(self, args);
-}
-
-static PyObject *
-cc_minimize(ccobject *self, PyObject *args)
-{
-    int ignored = -999;
-
-    if (!PyArg_ParseTuple(args, "|i:minimize", &ignored))
-	return NULL;
-
-    if (ignored != -999
-        &&
-        (PyErr_Warn(PyExc_DeprecationWarning,
-                    "No argument expected")
-         < 0))
-        return NULL;
-
-    return lockgc(self, 0);
-}
-
-static int
-_invalidate(ccobject *self, PyObject *key)
-{
-    static PyObject *_p_invalidate = NULL;
-    PyObject *meth, *v;
-
-    v = PyDict_GetItem(self->data, key);
-    if (v == NULL)
-	return 0;
-
-    if (_p_invalidate == NULL)
-      {
-	_p_invalidate = PyString_InternFromString("_p_invalidate");
-	if (_p_invalidate == NULL)
-          {
-	    /* It doesn't make any sense to ignore this error, but
-	       the caller ignores all errors.
-
-               TODO: and why does it do that? This should be fixed
-	    */
-	    return -1;
-          }
-      }
-
-    if (v->ob_refcnt <= 1 && PyType_Check(v)) {
-      /* This looks wrong, but it isn't. We use strong references to types
-         because they don't have the ring members.
-
-         The result is that we *never* remove classes unless
-         they are modified.  We can fix this by using wekrefs uniformly.
-      */
-      self->klass_count--;
-      return PyDict_DelItem(self->data, key);
-    }
-
-    meth = PyObject_GetAttr(v, _p_invalidate);
-    if (meth == NULL)
-      return -1;
-
-    v = PyObject_CallObject(meth, NULL);
-    Py_DECREF(meth);
-    return v == NULL ? -1 : 0;
-}
-
-static PyObject *
-cc_invalidate(ccobject *self, PyObject *inv)
-{
-  PyObject *key, *v;
-  Py_ssize_t i = 0;
-
-  if (PyDict_Check(inv))
-    {
-      while (PyDict_Next(inv, &i, &key, &v))
-        {
-	  if (_invalidate(self, key) < 0)
-            return NULL;
-        }
-      PyDict_Clear(inv);
-    }
-  else {
-      if (PyString_Check(inv))
-        {
-	  if (_invalidate(self, inv) < 0)
-            return NULL;
-        }
-      else {
-	  int l, r;
-
-	  l = PyObject_Length(inv);
-	  if (l < 0)
-	      return NULL;
-	  for (i=l; --i >= 0; ) {
-	      key = PySequence_GetItem(inv, i);
-	      if (!key)
-		  return NULL;
-	      r = _invalidate(self, key);
-	      Py_DECREF(key);
-              if (r < 0)
-                return NULL;
-	  }
-	  /* Dubious:  modifying the input may be an unexpected side effect. */
-	  PySequence_DelSlice(inv, 0, l);
-      }
-  }
-
-  Py_INCREF(Py_None);
-  return Py_None;
-}
-
-static PyObject *
-cc_get(ccobject *self, PyObject *args)
-{
-    PyObject *r, *key, *d = NULL;
-
-    if (!PyArg_ParseTuple(args, "O|O:get", &key, &d))
-	return NULL;
-
-    r = PyDict_GetItem(self->data, key);
-    if (!r) {
-	if (d)
-	    r = d;
-	else
-	    r = Py_None;
-    }
-    Py_INCREF(r);
-    return r;
-}
-
-static PyObject *
-cc_items(ccobject *self)
-{
-    return PyObject_CallMethod(self->data, "items", "");
-}
-
-static PyObject *
-cc_klass_items(ccobject *self)
-{
-    PyObject *l,*k,*v;
-    Py_ssize_t p = 0;
-
-    l = PyList_New(0);
-    if (l == NULL)
-	return NULL;
-
-    while (PyDict_Next(self->data, &p, &k, &v)) {
-        if(PyType_Check(v)) {
-	    v = Py_BuildValue("OO", k, v);
-	    if (v == NULL) {
-		Py_DECREF(l);
-		return NULL;
-	    }
-	    if (PyList_Append(l, v) < 0) {
-		Py_DECREF(v);
-		Py_DECREF(l);
-		return NULL;
-	    }
-	    Py_DECREF(v);
-        }
-    }
-
-    return l;
-}
-
-static PyObject *
-cc_debug_info(ccobject *self)
-{
-    PyObject *l,*k,*v;
-    Py_ssize_t p = 0;
-
-    l = PyList_New(0);
-    if (l == NULL)
-	return NULL;
-
-    while (PyDict_Next(self->data, &p, &k, &v))
-      {
-        if (v->ob_refcnt <= 0)
-          v = Py_BuildValue("Oi", k, v->ob_refcnt);
-
-        else if (! PyType_Check(v) &&
-                 (v->ob_type->tp_basicsize >= sizeof(cPersistentObject))
-                 )
-          v = Py_BuildValue("Oisi",
-                            k, v->ob_refcnt, v->ob_type->tp_name,
-                            ((cPersistentObject*)v)->state);
-        else
-          v = Py_BuildValue("Ois", k, v->ob_refcnt, v->ob_type->tp_name);
-
-        if (v == NULL)
-          goto err;
-
-        if (PyList_Append(l, v) < 0)
-          goto err;
-      }
-
-    return l;
-
- err:
-    Py_DECREF(l);
-    return NULL;
-
-}
-
-static PyObject *
-cc_lru_items(ccobject *self)
-{
-    PyObject *l;
-    CPersistentRing *here;
-
-    if (self->ring_lock) {
-	/* When the ring lock is held, we have no way of know which
-	   ring nodes belong to persistent objects, and which a
-	   placeholders. */
-        PyErr_SetString(PyExc_ValueError,
-		".lru_items() is unavailable during garbage collection");
-        return NULL;
-    }
-
-    l = PyList_New(0);
-    if (l == NULL)
-	return NULL;
-
-    here = self->ring_home.r_next;
-    while (here != &self->ring_home) {
-        PyObject *v;
-        cPersistentObject *object = OBJECT_FROM_RING(self, here);
-
-        if (object == NULL) {
-            Py_DECREF(l);
-            return NULL;
-        }
-	v = Py_BuildValue("OO", object->oid, object);
-	if (v == NULL) {
-            Py_DECREF(l);
-            return NULL;
-	}
-	if (PyList_Append(l, v) < 0) {
-	    Py_DECREF(v);
-            Py_DECREF(l);
-            return NULL;
-	}
-        Py_DECREF(v);
-        here = here->r_next;
-    }
-
-    return l;
-}
-
-static void
-cc_oid_unreferenced(ccobject *self, PyObject *oid)
-{
-    /* This is called by the persistent object deallocation function
-       when the reference count on a persistent object reaches
-       zero. We need to fix up our dictionary; its reference is now
-       dangling because we stole its reference count. Be careful to
-       not release the global interpreter lock until this is
-       complete. */
-
-    PyObject *v;
-
-    /* If the cache has been cleared by GC, data will be NULL. */
-    if (!self->data)
-	return;
-
-    v = PyDict_GetItem(self->data, oid);
-    assert(v);
-    assert(v->ob_refcnt == 0);
-    /* Need to be very hairy here because a dictionary is about
-       to decref an already deleted object.
-    */
-
-#ifdef Py_TRACE_REFS
-    /* This is called from the deallocation function after the
-       interpreter has untracked the reference.  Track it again.
-     */
-    _Py_NewReference(v);
-    /* Don't increment total refcount as a result of the
-       shenanigans played in this function.  The _Py_NewReference()
-       call above creates artificial references to v.
-    */
-    _Py_RefTotal--;
-    assert(v->ob_type);
-#else
-    Py_INCREF(v);
-#endif
-    assert(v->ob_refcnt == 1);
-    /* Incremement the refcount again, because delitem is going to
-       DECREF it.  If it's refcount reached zero again, we'd call back to
-       the dealloc function that called us.
-    */
-    Py_INCREF(v);
-
-    /* TODO:  Should we call _Py_ForgetReference() on error exit? */
-    if (PyDict_DelItem(self->data, oid) < 0)
-	return;
-    Py_DECREF((ccobject *)((cPersistentObject *)v)->cache);
-    ((cPersistentObject *)v)->cache = NULL;
-
-    assert(v->ob_refcnt == 1);
-
-    /* Undo the temporary resurrection.
-       Don't DECREF the object, because this function is called from
-       the object's dealloc function. If the refcnt reaches zero, it
-       will all be invoked recursively.
-     */
-    _Py_ForgetReference(v);
-}
-
-static PyObject *
-cc_ringlen(ccobject *self)
-{
-    CPersistentRing *here;
-    int c = 0;
-
-    for (here = self->ring_home.r_next; here != &self->ring_home;
-	 here = here->r_next)
-	c++;
-    return PyInt_FromLong(c);
-}
-
-static struct PyMethodDef cc_methods[] = {
-    {"items", (PyCFunction)cc_items, METH_NOARGS,
-     "Return list of oid, object pairs for all items in cache."},
-    {"lru_items", (PyCFunction)cc_lru_items, METH_NOARGS,
-     "List (oid, object) pairs from the lru list, as 2-tuples."},
-    {"klass_items", (PyCFunction)cc_klass_items, METH_NOARGS,
-     "List (oid, object) pairs of cached persistent classes."},
-    {"full_sweep", (PyCFunction)cc_full_sweep, METH_VARARGS,
-     "full_sweep() -- Perform a full sweep of the cache."},
-    {"minimize",	(PyCFunction)cc_minimize, METH_VARARGS,
-     "minimize([ignored]) -- Remove as many objects as possible\n\n"
-     "Ghostify all objects that are not modified.  Takes an optional\n"
-     "argument, but ignores it."},
-    {"incrgc", (PyCFunction)cc_incrgc, METH_VARARGS,
-     "incrgc() -- Perform incremental garbage collection\n\n"
-     "This method had been depricated!"
-     "Some other implementations support an optional parameter 'n' which\n"
-     "indicates a repetition count; this value is ignored."},
-    {"invalidate", (PyCFunction)cc_invalidate, METH_O,
-     "invalidate(oids) -- invalidate one, many, or all ids"},
-    {"get", (PyCFunction)cc_get, METH_VARARGS,
-     "get(key [, default]) -- get an item, or a default"},
-    {"ringlen", (PyCFunction)cc_ringlen, METH_NOARGS,
-     "ringlen() -- Returns number of non-ghost items in cache."},
-    {"debug_info", (PyCFunction)cc_debug_info, METH_NOARGS,
-     "debug_info() -- Returns debugging data about objects in the cache."},
-    {NULL, NULL}		/* sentinel */
-};
-
-static int
-cc_init(ccobject *self, PyObject *args, PyObject *kwds)
-{
-    int cache_size = 100;
-    PyObject *jar;
-
-    if (!PyArg_ParseTuple(args, "O|i", &jar, &cache_size))
-	return -1;
-
-    self->jar = NULL;
-    self->data = PyDict_New();
-    if (self->data == NULL) {
-	Py_DECREF(self);
-	return -1;
-    }
-    /* Untrack the dict mapping oids to objects.
-
-    The dict contains uncounted references to ghost objects, so it
-    isn't safe for GC to visit it.  If GC finds an object with more
-    referents that refcounts, it will die with an assertion failure.
-
-    When the cache participates in GC, it will need to traverse the
-    objects in the doubly-linked list, which will account for all the
-    non-ghost objects.
-    */
-    PyObject_GC_UnTrack((void *)self->data);
-    self->jar = jar;
-    Py_INCREF(jar);
-    self->cache_size = cache_size;
-    self->non_ghost_count = 0;
-    self->klass_count = 0;
-    self->cache_drain_resistance = 0;
-    self->ring_lock = 0;
-    self->ring_home.r_next = &self->ring_home;
-    self->ring_home.r_prev = &self->ring_home;
-    return 0;
-}
-
-static void
-cc_dealloc(ccobject *self)
-{
-    Py_XDECREF(self->data);
-    Py_XDECREF(self->jar);
-    PyObject_GC_Del(self);
-}
-
-static int
-cc_clear(ccobject *self)
-{
-    Py_ssize_t pos = 0;
-    PyObject *k, *v;
-    /* Clearing the cache is delicate.
-
-    A non-ghost object will show up in the ring and in the dict.  If
-    we deallocating the dict before clearing the ring, the GC will
-    decref each object in the dict.  Since the dict references are
-    uncounted, this will lead to objects having negative refcounts.
-
-    Freeing the non-ghost objects should eliminate many objects from
-    the cache, but there may still be ghost objects left.  It's
-    not safe to decref the dict until it's empty, so we need to manually
-    clear those out of the dict, too.  We accomplish that by replacing
-    all the ghost objects with None.
-    */
-
-    /* We don't need to lock the ring, because the cache is unreachable.
-    It should be impossible for anyone to be modifying the cache.
-    */
-    assert(! self->ring_lock);
-
-    while (self->ring_home.r_next != &self->ring_home) {
-	CPersistentRing *here = self->ring_home.r_next;
-	cPersistentObject *o = OBJECT_FROM_RING(self, here);
-
-	if (o->cache) {
-	    Py_INCREF(o); /* account for uncounted reference */
-	    if (PyDict_DelItem(self->data, o->oid) < 0)
-		return -1;
-	}
-	o->cache = NULL;
-	Py_DECREF(self);
-	self->ring_home.r_next = here->r_next;
-	o->ring.r_prev = NULL;
-	o->ring.r_next = NULL;
-	Py_DECREF(o);
-	here = here->r_next;
-    }
-
-    Py_XDECREF(self->jar);
-
-    while (PyDict_Next(self->data, &pos, &k, &v)) {
-	Py_INCREF(v);
-	if (PyDict_SetItem(self->data, k, Py_None) < 0)
-	    return -1;
-    }
-    Py_XDECREF(self->data);
-    self->data = NULL;
-    self->jar = NULL;
-    return 0;
-}
-
-static int
-cc_traverse(ccobject *self, visitproc visit, void *arg)
-{
-    int err;
-    CPersistentRing *here;
-
-    /* If we're in the midst of cleaning up old objects, the ring contains
-     * assorted junk we must not pass on to the visit() callback.  This
-     * should be rare (our cleanup code would need to have called back
-     * into Python, which in turn triggered Python's gc).  When it happens,
-     * simply don't chase any pointers.  The cache will appear to be a
-     * source of external references then, and at worst we miss cleaning
-     * up a dead cycle until the next time Python's gc runs.
-     */
-    if (self->ring_lock)
-    	return 0;
-
-#define VISIT(SLOT) \
-    if (SLOT) { \
-	err = visit((PyObject *)(SLOT), arg); \
-	if (err) \
-		     return err; \
-    }
-
-    VISIT(self->jar);
-
-    here = self->ring_home.r_next;
-
-    /* It is possible that an object is traversed after it is cleared.
-       In that case, there is no ring.
-    */
-    if (!here)
-	return 0;
-
-    while (here != &self->ring_home) {
-	cPersistentObject *o = OBJECT_FROM_RING(self, here);
-	VISIT(o);
-	here = here->r_next;
-    }
-#undef VISIT
-
-    return 0;
-}
-
-static int
-cc_length(ccobject *self)
-{
-    return PyObject_Length(self->data);
-}
-
-static PyObject *
-cc_subscript(ccobject *self, PyObject *key)
-{
-    PyObject *r;
-
-    r = PyDict_GetItem(self->data, key);
-    if (r == NULL) {
-	PyErr_SetObject(PyExc_KeyError, key);
-	return NULL;
-    }
-    Py_INCREF(r);
-
-    return r;
-}
-
-static int
-cc_add_item(ccobject *self, PyObject *key, PyObject *v)
-{
-    int result;
-    PyObject *oid, *object_again, *jar;
-    cPersistentObject *p;
-
-    /* Sanity check the value given to make sure it is allowed in the cache */
-    if (PyType_Check(v)) {
-        /* Its a persistent class, such as a ZClass. Thats ok. */
-    }
-    else if (v->ob_type->tp_basicsize < sizeof(cPersistentObject)) {
-        /* If it's not an instance of a persistent class, (ie Python
-	   classes that derive from persistent.Persistent, BTrees,
-	   etc), report an error.
-
-	   TODO:  checking sizeof() seems a poor test.
-	*/
-	PyErr_SetString(PyExc_TypeError,
-			"Cache values must be persistent objects.");
-	return -1;
-    }
-
-    /* Can't access v->oid directly because the object might be a
-     *  persistent class.
-     */
-    oid = PyObject_GetAttr(v, py__p_oid);
-    if (oid == NULL)
-	return -1;
-    if (! PyString_Check(oid)) {
-        PyErr_Format(PyExc_TypeError,
-                     "Cached object oid must be a string, not a %s",
-		     oid->ob_type->tp_name);
-	return -1;
-    }
-
-    /*  we know they are both strings.
-     *  now check if they are the same string.
-     */
-    result = PyObject_Compare(key, oid);
-    if (PyErr_Occurred()) {
-	Py_DECREF(oid);
-	return -1;
-    }
-    Py_DECREF(oid);
-    if (result) {
-	PyErr_SetString(PyExc_ValueError, "Cache key does not match oid");
-	return -1;
-    }
-
-    /* useful sanity check, but not strictly an invariant of this class */
-    jar = PyObject_GetAttr(v, py__p_jar);
-    if (jar == NULL)
-        return -1;
-    if (jar==Py_None) {
-        Py_DECREF(jar);
-        PyErr_SetString(PyExc_ValueError,
-                        "Cached object jar missing");
-	return -1;
-    }
-    Py_DECREF(jar);
-
-    object_again = PyDict_GetItem(self->data, key);
-    if (object_again) {
-	if (object_again != v) {
-	    PyErr_SetString(PyExc_ValueError,
-		    "A different object already has the same oid");
-	    return -1;
-	} else {
-	    /* re-register under the same oid - no work needed */
-	    return 0;
-	}
-    }
-
-    if (PyType_Check(v)) {
-	if (PyDict_SetItem(self->data, key, v) < 0)
-	    return -1;
-	self->klass_count++;
-	return 0;
-    } else {
-	PerCache *cache = ((cPersistentObject *)v)->cache;
-	if (cache) {
-	    if (cache != (PerCache *)self)
-		/* This object is already in a different cache. */
-		PyErr_SetString(PyExc_ValueError,
-				"Cache values may only be in one cache.");
-	    return -1;
-	}
-	/* else:
-
-	   This object is already one of ours, which is ok.  It
-	   would be very strange if someone was trying to register
-	   the same object under a different key.
-	*/
-    }
-
-    if (PyDict_SetItem(self->data, key, v) < 0)
-	return -1;
-    /* the dict should have a borrowed reference */
-    Py_DECREF(v);
-
-    p = (cPersistentObject *)v;
-    Py_INCREF(self);
-    p->cache = (PerCache *)self;
-    if (p->state >= 0) {
-	/* insert this non-ghost object into the ring just
-	   behind the home position. */
-	self->non_ghost_count++;
-	ring_add(&self->ring_home, &p->ring);
-	/* this list should have a new reference to the object */
-	Py_INCREF(v);
-    }
-    return 0;
-}
-
-static int
-cc_del_item(ccobject *self, PyObject *key)
-{
-    PyObject *v;
-    cPersistentObject *p;
-
-    /* unlink this item from the ring */
-    v = PyDict_GetItem(self->data, key);
-    if (v == NULL) {
-	PyErr_SetObject(PyExc_KeyError, key);
-	return -1;
-    }
-
-    if (PyType_Check(v)) {
-	self->klass_count--;
-    } else {
-	p = (cPersistentObject *)v;
-	if (p->state >= 0) {
-	    self->non_ghost_count--;
-	    ring_del(&p->ring);
-	    /* The DelItem below will account for the reference
-	       held by the list. */
-	} else {
-	    /* This is a ghost object, so we haven't kept a reference
-	       count on it.  For it have stayed alive this long
-	       someone else must be keeping a reference to
-	       it. Therefore we need to temporarily give it back a
-	       reference count before calling DelItem below */
-	    Py_INCREF(v);
-	}
-
-	Py_DECREF((PyObject *)p->cache);
-	p->cache = NULL;
-    }
-
-    if (PyDict_DelItem(self->data, key) < 0) {
-	PyErr_SetString(PyExc_RuntimeError,
-			"unexpectedly couldn't remove key in cc_ass_sub");
-	return -1;
-    }
-
-    return 0;
-}
-
-static int
-cc_ass_sub(ccobject *self, PyObject *key, PyObject *v)
-{
-    if (!PyString_Check(key)) {
-	PyErr_Format(PyExc_TypeError,
-                     "cPickleCache key must be a string, not a %s",
-		     key->ob_type->tp_name);
-	return -1;
-    }
-    if (v)
-	return cc_add_item(self, key, v);
-    else
-	return cc_del_item(self, key);
-}
-
-static PyMappingMethods cc_as_mapping = {
-  (inquiry)cc_length,		/*mp_length*/
-  (binaryfunc)cc_subscript,	/*mp_subscript*/
-  (objobjargproc)cc_ass_sub,	/*mp_ass_subscript*/
-};
-
-static PyObject *
-cc_cache_data(ccobject *self, void *context)
-{
-    return PyDict_Copy(self->data);
-}
-
-static PyGetSetDef cc_getsets[] = {
-    {"cache_data", (getter)cc_cache_data},
-    {NULL}
-};
-
-
-static PyMemberDef cc_members[] = {
-    {"cache_size", T_INT, offsetof(ccobject, cache_size)},
-    {"cache_drain_resistance", T_INT,
-     offsetof(ccobject, cache_drain_resistance)},
-    {"cache_non_ghost_count", T_INT, offsetof(ccobject, non_ghost_count), RO},
-    {"cache_klass_count", T_INT, offsetof(ccobject, klass_count), RO},
-    {NULL}
-};
-
-/* This module is compiled as a shared library.  Some compilers don't
-   allow addresses of Python objects defined in other libraries to be
-   used in static initializers here.  The DEFERRED_ADDRESS macro is
-   used to tag the slots where such addresses appear; the module init
-   function must fill in the tagged slots at runtime.  The argument is
-   for documentation -- the macro ignores it.
-*/
-#define DEFERRED_ADDRESS(ADDR) 0
-
-static PyTypeObject Cctype = {
-    PyObject_HEAD_INIT(DEFERRED_ADDRESS(&PyType_Type))
-    0,					/* ob_size */
-    "persistent.PickleCache",		/* tp_name */
-    sizeof(ccobject),			/* tp_basicsize */
-    0,					/* tp_itemsize */
-    (destructor)cc_dealloc,		/* tp_dealloc */
-    0,					/* tp_print */
-    0,					/* tp_getattr */
-    0,					/* tp_setattr */
-    0,					/* tp_compare */
-    0,					/* tp_repr */
-    0,					/* tp_as_number */
-    0,					/* tp_as_sequence */
-    &cc_as_mapping,			/* tp_as_mapping */
-    0,					/* tp_hash */
-    0,					/* tp_call */
-    0,					/* tp_str */
-    0,					/* tp_getattro */
-    0,					/* tp_setattro */
-    0,					/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
-    					/* tp_flags */
-    0,					/* tp_doc */
-    (traverseproc)cc_traverse,		/* tp_traverse */
-    (inquiry)cc_clear,			/* tp_clear */
-    0,					/* tp_richcompare */
-    0,					/* tp_weaklistoffset */
-    0,					/* tp_iter */
-    0,					/* tp_iternext */
-    cc_methods,				/* tp_methods */
-    cc_members,				/* tp_members */
-    cc_getsets,				/* tp_getset */
-    0,					/* tp_base */
-    0,					/* tp_dict */
-    0,					/* tp_descr_get */
-    0,					/* tp_descr_set */
-    0,					/* tp_dictoffset */
-    (initproc)cc_init,			/* tp_init */
-};
-
-void
-initcPickleCache(void)
-{
-    PyObject *m;
-
-    Cctype.ob_type = &PyType_Type;
-    Cctype.tp_new = &PyType_GenericNew;
-    if (PyType_Ready(&Cctype) < 0) {
-	return;
-    }
-
-    m = Py_InitModule3("cPickleCache", NULL, cPickleCache_doc_string);
-
-    capi = (cPersistenceCAPIstruct *)PyCObject_Import(
-	"persistent.cPersistence", "CAPI");
-    if (!capi)
-	return;
-    capi->percachedel = (percachedelfunc)cc_oid_unreferenced;
-
-    py__p_changed = PyString_InternFromString("_p_changed");
-    if (!py__p_changed)
-        return;
-    py__p_deactivate = PyString_InternFromString("_p_deactivate");
-    if (!py__p_deactivate)
-        return;
-    py__p_jar = PyString_InternFromString("_p_jar");
-    if (!py__p_jar)
-        return;
-    py__p_oid = PyString_InternFromString("_p_oid");
-    if (!py__p_oid)
-        return;
-
-    if (PyModule_AddStringConstant(m, "cache_variant", "stiff/c") < 0)
-	return;
-
-    /* This leaks a reference to Cctype, but it doesn't matter. */
-    if (PyModule_AddObject(m, "PickleCache", (PyObject *)&Cctype) < 0)
-	return;
-}

Copied: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPickleCache.c (from rev 91360, Sandbox/wichert/ZODB38-jarn/src/persistent/cPickleCache.c)
===================================================================
--- Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPickleCache.c	                        (rev 0)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/cPickleCache.c	2008-09-22 16:53:27 UTC (rev 91361)
@@ -0,0 +1,1157 @@
+ /*****************************************************************************
+
+  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+  All Rights Reserved.
+
+  This software is subject to the provisions of the Zope Public License,
+  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+  FOR A PARTICULAR PURPOSE
+
+ ****************************************************************************/
+
+/*
+
+Objects are stored under three different regimes:
+
+Regime 1: Persistent Classes
+
+Persistent Classes are part of ZClasses. They are stored in the
+self->data dictionary, and are never garbage collected.
+
+The klass_items() method returns a sequence of (oid,object) tuples for
+every Persistent Class, which should make it possible to implement
+garbage collection in Python if necessary.
+
+Regime 2: Ghost Objects
+
+There is no benefit to keeping a ghost object which has no external
+references, therefore a weak reference scheme is used to ensure that
+ghost objects are removed from memory as soon as possible, when the
+last external reference is lost.
+
+Ghost objects are stored in the self->data dictionary. Normally a
+dictionary keeps a strong reference on its values, however this
+reference count is 'stolen'.
+
+This weak reference scheme leaves a dangling reference, in the
+dictionary, when the last external reference is lost. To clean up this
+dangling reference the persistent object dealloc function calls
+self->cache->_oid_unreferenced(self->oid). The cache looks up the oid
+in the dictionary, ensures it points to an object whose reference
+count is zero, then removes it from the dictionary. Before removing
+the object from the dictionary it must temporarily resurrect the
+object in much the same way that class instances are resurrected
+before their __del__ is called.
+
+Since ghost objects are stored under a different regime to non-ghost
+objects, an extra ghostify function in cPersistenceAPI replaces
+self->state=GHOST_STATE assignments that were common in other
+persistent classes (such as BTrees).
+
+Regime 3: Non-Ghost Objects
+
+Non-ghost objects are stored in two data structures: the dictionary
+mapping oids to objects and a doubly-linked list that encodes the
+order in which the objects were accessed.  The dictionary reference is
+borrowed, as it is for ghosts.  The list reference is a new reference;
+the list stores recently used objects, even if they are otherwise
+unreferenced, to avoid loading the object from the database again.
+
+The doubly-link-list nodes contain next and previous pointers linking
+together the cache and all non-ghost persistent objects.
+
+The node embedded in the cache is the home position. On every
+attribute access a non-ghost object will relink itself just behind the
+home position in the ring. Objects accessed least recently will
+eventually find themselves positioned after the home position.
+
+Occasionally other nodes are temporarily inserted in the ring as
+position markers. The cache contains a ring_lock flag which must be
+set and unset before and after doing so. Only if the flag is unset can
+the cache assume that all nodes are either his own home node, or nodes
+from persistent objects. This assumption is useful during the garbage
+collection process.
+
+The number of non-ghost objects is counted in self->non_ghost_count.
+The garbage collection process consists of traversing the ring, and
+deactivating (that is, turning into a ghost) every object until
+self->non_ghost_count is down to the target size, or until it
+reaches the home position again.
+
+Note that objects in the sticky or changed states are still kept in
+the ring, however they can not be deactivated. The garbage collection
+process must skip such objects, rather than deactivating them.
+
+*/
+
+static char cPickleCache_doc_string[] =
+"Defines the PickleCache used by ZODB Connection objects.\n"
+"\n"
+"$Id$\n";
+
+#define DONT_USE_CPERSISTENCECAPI
+#include "cPersistence.h"
+#include "structmember.h"
+#include <time.h>
+#include <stddef.h>
+#undef Py_FindMethod
+
+/* Python string objects to speed lookups; set by module init. */
+static PyObject *py__p_changed;
+static PyObject *py__p_deactivate;
+static PyObject *py__p_jar;
+static PyObject *py__p_oid;
+
+static cPersistenceCAPIstruct *capi;
+
+/* This object is the pickle cache.  The CACHE_HEAD macro guarantees
+   that layout of this struct is the same as the start of
+   ccobject_head in cPersistence.c */
+typedef struct {
+    CACHE_HEAD
+    int klass_count;                     /* count of persistent classes */
+    PyObject *data;                      /* oid -> object dict */
+    PyObject *jar;                       /* Connection object */
+    int cache_size;                      /* target number of items in cache */
+    PY_LONG_LONG cache_size_bytes;       /* target total estimated size of items in cache */
+
+    /* Most of the time the ring contains only:
+       * many nodes corresponding to persistent objects
+       * one 'home' node from the cache.
+    In some cases it is handy to temporarily add other types
+    of node into the ring as placeholders. 'ring_lock' is a boolean
+    indicating that someone has already done this. Currently this
+    is only used by the garbage collection code. */
+
+    int ring_lock;
+
+    /* 'cache_drain_resistance' controls how quickly the cache size will drop
+    when it is smaller than the configured size. A value of zero means it will
+    not drop below the configured size (suitable for most caches). Otherwise,
+    it will remove cache_non_ghost_count/cache_drain_resistance items from
+    the cache every time (suitable for rarely used caches, such as those
+    associated with Zope versions. */
+
+    int cache_drain_resistance;
+
+} ccobject;
+
+static int cc_ass_sub(ccobject *self, PyObject *key, PyObject *v);
+
+/* ---------------------------------------------------------------- */
+
+#define OBJECT_FROM_RING(SELF, HERE) \
+    ((cPersistentObject *)(((char *)here) - offsetof(cPersistentObject, ring)))
+
+/* Insert self into the ring, following after. */
+static void
+insert_after(CPersistentRing *self, CPersistentRing *after)
+{
+    assert(self != NULL);
+    assert(after != NULL);
+    self->r_prev = after;
+    self->r_next = after->r_next;
+    after->r_next->r_prev = self;
+    after->r_next = self;
+}
+
+/* Remove self from the ring. */
+static void
+unlink_from_ring(CPersistentRing *self)
+{
+    assert(self != NULL);
+    self->r_prev->r_next = self->r_next;
+    self->r_next->r_prev = self->r_prev;
+}
+
+static int
+scan_gc_items(ccobject *self, int target, PY_LONG_LONG target_bytes)
+{
+    /* This function must only be called with the ring lock held,
+       because it places non-object placeholders in the ring.
+    */
+    cPersistentObject *object;
+    CPersistentRing *here;
+    CPersistentRing before_original_home;
+    int result = -1;   /* guilty until proved innocent */
+
+    /* Scan the ring, from least to most recently used, deactivating
+     * up-to-date objects, until we either find the ring_home again or
+     * or we've ghosted enough objects to reach the target size.
+     * Tricky:  __getattr__ and __del__ methods can do anything, and in
+     * particular if we ghostify an object with a __del__ method, that method
+     * can load the object again, putting it back into the MRU part of the
+     * ring.  Waiting to find ring_home again can thus cause an infinite
+     * loop (Collector #1208).  So before_original_home records the MRU
+     * position we start with, and we stop the scan when we reach that.
+     */
+    insert_after(&before_original_home, self->ring_home.r_prev);
+    here = self->ring_home.r_next;   /* least recently used object */
+    while (here != &before_original_home &&
+	   (self->non_ghost_count > target
+	    || (target_bytes && self->total_estimated_size > target_bytes)
+	    )
+	   ) {
+	assert(self->ring_lock);
+	assert(here != &self->ring_home);
+
+        /* At this point we know that the ring only contains nodes
+	   from persistent objects, plus our own home node.  We know
+	   this because the ring lock is held.  We can safely assume
+	   the current ring node is a persistent object now we know it
+	   is not the home */
+        object = OBJECT_FROM_RING(self, here);
+
+        if (object->state == cPersistent_UPTODATE_STATE) {
+            CPersistentRing placeholder;
+            PyObject *method;
+            PyObject *temp;
+            int error_occurred = 0;
+            /* deactivate it. This is the main memory saver. */
+
+            /* Add a placeholder, a dummy node in the ring.  We need
+	       to do this to mark our position in the ring.  It is
+	       possible that the PyObject_GetAttr() call below will
+	       invoke a __getattr__() hook in Python.  Also possible
+	       that deactivation will lead to a __del__ method call.
+	       So another thread might run, and mutate the ring as a side
+	       effect of object accesses.  There's no predicting then where
+	       in the ring here->next will point after that.  The
+	       placeholder won't move as a side effect of calling Python
+	       code.
+	    */
+            insert_after(&placeholder, here);
+	    method = PyObject_GetAttr((PyObject *)object, py__p_deactivate);
+	    if (method == NULL)
+	        error_occurred = 1;
+	    else {
+ 		temp = PyObject_CallObject(method, NULL);
+                Py_DECREF(method);
+	        if (temp == NULL)
+	            error_occurred = 1;
+	    }
+
+            here = placeholder.r_next;
+            unlink_from_ring(&placeholder);
+            if (error_occurred)
+                goto Done;
+        }
+        else
+            here = here->r_next;
+    }
+    result = 0;
+ Done:
+    unlink_from_ring(&before_original_home);
+    return result;
+}
+
+static PyObject *
+lockgc(ccobject *self, int target_size, PY_LONG_LONG target_size_bytes)
+{
+    /* This is thread-safe because of the GIL, and there's nothing
+     * in between checking the ring_lock and acquiring it that calls back
+     * into Python.
+     */
+    if (self->ring_lock) {
+        Py_INCREF(Py_None);
+        return Py_None;
+    }
+
+    self->ring_lock = 1;
+    if (scan_gc_items(self, target_size, target_size_bytes) < 0) {
+        self->ring_lock = 0;
+        return NULL;
+    }
+    self->ring_lock = 0;
+
+    Py_INCREF(Py_None);
+    return Py_None;
+}
+
+static PyObject *
+cc_incrgc(ccobject *self, PyObject *args)
+{
+    int obsolete_arg = -999;
+    int starting_size = self->non_ghost_count;
+    int target_size = self->cache_size;
+    PY_LONG_LONG target_size_bytes = self->cache_size_bytes;
+
+    if (self->cache_drain_resistance >= 1) {
+        /* This cache will gradually drain down to a small size. Check
+           a (small) number of objects proportional to the current size */
+
+        int target_size_2 = (starting_size - 1
+			     - starting_size / self->cache_drain_resistance);
+        if (target_size_2 < target_size)
+            target_size = target_size_2;
+    }
+
+
+    if (!PyArg_ParseTuple(args, "|i:incrgc", &obsolete_arg))
+	return NULL;
+
+    if (obsolete_arg != -999
+        &&
+        (PyErr_Warn(PyExc_DeprecationWarning,
+                    "No argument expected")
+         < 0))
+        return NULL;
+
+    return lockgc(self, target_size, target_size_bytes);
+}
+
+static PyObject *
+cc_full_sweep(ccobject *self, PyObject *args)
+{
+    int dt = -999;
+
+    /* TODO:  This should be deprecated;  */
+
+    if (!PyArg_ParseTuple(args, "|i:full_sweep", &dt))
+	return NULL;
+    if (dt == -999)
+        return lockgc(self, 0, 0);
+    else
+	return cc_incrgc(self, args);
+}
+
+static PyObject *
+cc_minimize(ccobject *self, PyObject *args)
+{
+    int ignored = -999;
+
+    if (!PyArg_ParseTuple(args, "|i:minimize", &ignored))
+	return NULL;
+
+    if (ignored != -999
+        &&
+        (PyErr_Warn(PyExc_DeprecationWarning,
+                    "No argument expected")
+         < 0))
+        return NULL;
+
+    return lockgc(self, 0, 0);
+}
+
+static int
+_invalidate(ccobject *self, PyObject *key)
+{
+    static PyObject *_p_invalidate = NULL;
+    PyObject *meth, *v;
+
+    v = PyDict_GetItem(self->data, key);
+    if (v == NULL)
+	return 0;
+
+    if (_p_invalidate == NULL)
+      {
+	_p_invalidate = PyString_InternFromString("_p_invalidate");
+	if (_p_invalidate == NULL)
+          {
+	    /* It doesn't make any sense to ignore this error, but
+	       the caller ignores all errors.
+
+               TODO: and why does it do that? This should be fixed
+	    */
+	    return -1;
+          }
+      }
+
+    if (v->ob_refcnt <= 1 && PyType_Check(v)) {
+      /* This looks wrong, but it isn't. We use strong references to types
+         because they don't have the ring members.
+
+         The result is that we *never* remove classes unless
+         they are modified.  We can fix this by using wekrefs uniformly.
+      */
+      self->klass_count--;
+      return PyDict_DelItem(self->data, key);
+    }
+
+    meth = PyObject_GetAttr(v, _p_invalidate);
+    if (meth == NULL)
+      return -1;
+
+    v = PyObject_CallObject(meth, NULL);
+    Py_DECREF(meth);
+    return v == NULL ? -1 : 0;
+}
+
+static PyObject *
+cc_invalidate(ccobject *self, PyObject *inv)
+{
+  PyObject *key, *v;
+  Py_ssize_t i = 0;
+
+  if (PyDict_Check(inv))
+    {
+      while (PyDict_Next(inv, &i, &key, &v))
+        {
+	  if (_invalidate(self, key) < 0)
+            return NULL;
+        }
+      PyDict_Clear(inv);
+    }
+  else {
+      if (PyString_Check(inv))
+        {
+	  if (_invalidate(self, inv) < 0)
+            return NULL;
+        }
+      else {
+	  int l, r;
+
+	  l = PyObject_Length(inv);
+	  if (l < 0)
+	      return NULL;
+	  for (i=l; --i >= 0; ) {
+	      key = PySequence_GetItem(inv, i);
+	      if (!key)
+		  return NULL;
+	      r = _invalidate(self, key);
+	      Py_DECREF(key);
+              if (r < 0)
+                return NULL;
+	  }
+	  /* Dubious:  modifying the input may be an unexpected side effect. */
+	  PySequence_DelSlice(inv, 0, l);
+      }
+  }
+
+  Py_INCREF(Py_None);
+  return Py_None;
+}
+
+static PyObject *
+cc_get(ccobject *self, PyObject *args)
+{
+    PyObject *r, *key, *d = NULL;
+
+    if (!PyArg_ParseTuple(args, "O|O:get", &key, &d))
+	return NULL;
+
+    r = PyDict_GetItem(self->data, key);
+    if (!r) {
+	if (d)
+	    r = d;
+	else
+	    r = Py_None;
+    }
+    Py_INCREF(r);
+    return r;
+}
+
+static PyObject *
+cc_items(ccobject *self)
+{
+    return PyObject_CallMethod(self->data, "items", "");
+}
+
+static PyObject *
+cc_klass_items(ccobject *self)
+{
+    PyObject *l,*k,*v;
+    Py_ssize_t p = 0;
+
+    l = PyList_New(0);
+    if (l == NULL)
+	return NULL;
+
+    while (PyDict_Next(self->data, &p, &k, &v)) {
+        if(PyType_Check(v)) {
+	    v = Py_BuildValue("OO", k, v);
+	    if (v == NULL) {
+		Py_DECREF(l);
+		return NULL;
+	    }
+	    if (PyList_Append(l, v) < 0) {
+		Py_DECREF(v);
+		Py_DECREF(l);
+		return NULL;
+	    }
+	    Py_DECREF(v);
+        }
+    }
+
+    return l;
+}
+
+static PyObject *
+cc_debug_info(ccobject *self)
+{
+    PyObject *l,*k,*v;
+    Py_ssize_t p = 0;
+
+    l = PyList_New(0);
+    if (l == NULL)
+	return NULL;
+
+    while (PyDict_Next(self->data, &p, &k, &v))
+      {
+        if (v->ob_refcnt <= 0)
+          v = Py_BuildValue("Oi", k, v->ob_refcnt);
+
+        else if (! PyType_Check(v) &&
+                 (v->ob_type->tp_basicsize >= sizeof(cPersistentObject))
+                 )
+          v = Py_BuildValue("Oisi",
+                            k, v->ob_refcnt, v->ob_type->tp_name,
+                            ((cPersistentObject*)v)->state);
+        else
+          v = Py_BuildValue("Ois", k, v->ob_refcnt, v->ob_type->tp_name);
+
+        if (v == NULL)
+          goto err;
+
+        if (PyList_Append(l, v) < 0)
+          goto err;
+      }
+
+    return l;
+
+ err:
+    Py_DECREF(l);
+    return NULL;
+
+}
+
+static PyObject *
+cc_lru_items(ccobject *self)
+{
+    PyObject *l;
+    CPersistentRing *here;
+
+    if (self->ring_lock) {
+	/* When the ring lock is held, we have no way of know which
+	   ring nodes belong to persistent objects, and which a
+	   placeholders. */
+        PyErr_SetString(PyExc_ValueError,
+		".lru_items() is unavailable during garbage collection");
+        return NULL;
+    }
+
+    l = PyList_New(0);
+    if (l == NULL)
+	return NULL;
+
+    here = self->ring_home.r_next;
+    while (here != &self->ring_home) {
+        PyObject *v;
+        cPersistentObject *object = OBJECT_FROM_RING(self, here);
+
+        if (object == NULL) {
+            Py_DECREF(l);
+            return NULL;
+        }
+	v = Py_BuildValue("OO", object->oid, object);
+	if (v == NULL) {
+            Py_DECREF(l);
+            return NULL;
+	}
+	if (PyList_Append(l, v) < 0) {
+	    Py_DECREF(v);
+            Py_DECREF(l);
+            return NULL;
+	}
+        Py_DECREF(v);
+        here = here->r_next;
+    }
+
+    return l;
+}
+
+static void
+cc_oid_unreferenced(ccobject *self, PyObject *oid)
+{
+    /* This is called by the persistent object deallocation function
+       when the reference count on a persistent object reaches
+       zero. We need to fix up our dictionary; its reference is now
+       dangling because we stole its reference count. Be careful to
+       not release the global interpreter lock until this is
+       complete. */
+
+    PyObject *v;
+
+    /* If the cache has been cleared by GC, data will be NULL. */
+    if (!self->data)
+	return;
+
+    v = PyDict_GetItem(self->data, oid);
+    assert(v);
+    assert(v->ob_refcnt == 0);
+    /* Need to be very hairy here because a dictionary is about
+       to decref an already deleted object.
+    */
+
+#ifdef Py_TRACE_REFS
+    /* This is called from the deallocation function after the
+       interpreter has untracked the reference.  Track it again.
+     */
+    _Py_NewReference(v);
+    /* Don't increment total refcount as a result of the
+       shenanigans played in this function.  The _Py_NewReference()
+       call above creates artificial references to v.
+    */
+    _Py_RefTotal--;
+    assert(v->ob_type);
+#else
+    Py_INCREF(v);
+#endif
+    assert(v->ob_refcnt == 1);
+    /* Incremement the refcount again, because delitem is going to
+       DECREF it.  If it's refcount reached zero again, we'd call back to
+       the dealloc function that called us.
+    */
+    Py_INCREF(v);
+
+    /* TODO:  Should we call _Py_ForgetReference() on error exit? */
+    if (PyDict_DelItem(self->data, oid) < 0)
+	return;
+    Py_DECREF((ccobject *)((cPersistentObject *)v)->cache);
+    ((cPersistentObject *)v)->cache = NULL;
+
+    assert(v->ob_refcnt == 1);
+
+    /* Undo the temporary resurrection.
+       Don't DECREF the object, because this function is called from
+       the object's dealloc function. If the refcnt reaches zero, it
+       will all be invoked recursively.
+     */
+    _Py_ForgetReference(v);
+}
+
+static PyObject *
+cc_ringlen(ccobject *self)
+{
+    CPersistentRing *here;
+    int c = 0;
+
+    for (here = self->ring_home.r_next; here != &self->ring_home;
+	 here = here->r_next)
+	c++;
+    return PyInt_FromLong(c);
+}
+
+static PyObject *
+cc_update_object_size_estimation(ccobject *self, PyObject *args)
+{
+    PyObject *oid;
+    cPersistentObject *v;
+    unsigned int new_size;
+    if (!PyArg_ParseTuple(args, "OI:updateObjectSizeEstimation", &oid, &new_size))
+	return NULL;
+    /* Note: reference borrowed */
+    v = (cPersistentObject *)PyDict_GetItem(self->data, oid);
+    if (v) {
+        /* we know this object -- update our "total_size_estimation"
+           we must only update when the object is in the ring
+	*/
+        if (v->ring.r_next) {
+            self->total_estimated_size += new_size - v->estimated_size;
+	    /* we do this in "Connection" as we need it even when the
+	       object is not in the cache (or not the ring)
+	    */
+	    /* v->estimated_size = new_size; */
+	}
+    }
+    Py_RETURN_NONE;
+ }
+
+
+static struct PyMethodDef cc_methods[] = {
+    {"items", (PyCFunction)cc_items, METH_NOARGS,
+     "Return list of oid, object pairs for all items in cache."},
+    {"lru_items", (PyCFunction)cc_lru_items, METH_NOARGS,
+     "List (oid, object) pairs from the lru list, as 2-tuples."},
+    {"klass_items", (PyCFunction)cc_klass_items, METH_NOARGS,
+     "List (oid, object) pairs of cached persistent classes."},
+    {"full_sweep", (PyCFunction)cc_full_sweep, METH_VARARGS,
+     "full_sweep() -- Perform a full sweep of the cache."},
+    {"minimize",	(PyCFunction)cc_minimize, METH_VARARGS,
+     "minimize([ignored]) -- Remove as many objects as possible\n\n"
+     "Ghostify all objects that are not modified.  Takes an optional\n"
+     "argument, but ignores it."},
+    {"incrgc", (PyCFunction)cc_incrgc, METH_VARARGS,
+     "incrgc() -- Perform incremental garbage collection\n\n"
+     "This method had been depricated!"
+     "Some other implementations support an optional parameter 'n' which\n"
+     "indicates a repetition count; this value is ignored."},
+    {"invalidate", (PyCFunction)cc_invalidate, METH_O,
+     "invalidate(oids) -- invalidate one, many, or all ids"},
+    {"get", (PyCFunction)cc_get, METH_VARARGS,
+     "get(key [, default]) -- get an item, or a default"},
+    {"ringlen", (PyCFunction)cc_ringlen, METH_NOARGS,
+     "ringlen() -- Returns number of non-ghost items in cache."},
+    {"debug_info", (PyCFunction)cc_debug_info, METH_NOARGS,
+     "debug_info() -- Returns debugging data about objects in the cache."},
+    {"update_object_size_estimation",
+     (PyCFunction)cc_update_object_size_estimation,
+     METH_VARARGS,
+     "update_object_size_estimation(oid, new_size) -- update the caches size estimation for *oid* (if this is known to the cache)."},
+    {NULL, NULL}		/* sentinel */
+};
+
+static int
+cc_init(ccobject *self, PyObject *args, PyObject *kwds)
+{
+    int cache_size = 100;
+    PY_LONG_LONG cache_size_bytes = 0;
+    PyObject *jar;
+
+    if (!PyArg_ParseTuple(args, "O|iL", &jar, &cache_size, &cache_size_bytes))
+	return -1;
+
+    self->jar = NULL;
+    self->data = PyDict_New();
+    if (self->data == NULL) {
+	Py_DECREF(self);
+	return -1;
+    }
+    /* Untrack the dict mapping oids to objects.
+
+    The dict contains uncounted references to ghost objects, so it
+    isn't safe for GC to visit it.  If GC finds an object with more
+    referents that refcounts, it will die with an assertion failure.
+
+    When the cache participates in GC, it will need to traverse the
+    objects in the doubly-linked list, which will account for all the
+    non-ghost objects.
+    */
+    PyObject_GC_UnTrack((void *)self->data);
+    self->jar = jar;
+    Py_INCREF(jar);
+    self->cache_size = cache_size;
+    self->cache_size_bytes = cache_size_bytes;
+    self->non_ghost_count = 0;
+    self->total_estimated_size = 0;
+    self->klass_count = 0;
+    self->cache_drain_resistance = 0;
+    self->ring_lock = 0;
+    self->ring_home.r_next = &self->ring_home;
+    self->ring_home.r_prev = &self->ring_home;
+    return 0;
+}
+
+static void
+cc_dealloc(ccobject *self)
+{
+    Py_XDECREF(self->data);
+    Py_XDECREF(self->jar);
+    PyObject_GC_Del(self);
+}
+
+static int
+cc_clear(ccobject *self)
+{
+    Py_ssize_t pos = 0;
+    PyObject *k, *v;
+    /* Clearing the cache is delicate.
+
+    A non-ghost object will show up in the ring and in the dict.  If
+    we deallocating the dict before clearing the ring, the GC will
+    decref each object in the dict.  Since the dict references are
+    uncounted, this will lead to objects having negative refcounts.
+
+    Freeing the non-ghost objects should eliminate many objects from
+    the cache, but there may still be ghost objects left.  It's
+    not safe to decref the dict until it's empty, so we need to manually
+    clear those out of the dict, too.  We accomplish that by replacing
+    all the ghost objects with None.
+    */
+
+    /* We don't need to lock the ring, because the cache is unreachable.
+    It should be impossible for anyone to be modifying the cache.
+    */
+    assert(! self->ring_lock);
+
+    while (self->ring_home.r_next != &self->ring_home) {
+	CPersistentRing *here = self->ring_home.r_next;
+	cPersistentObject *o = OBJECT_FROM_RING(self, here);
+
+	if (o->cache) {
+	    Py_INCREF(o); /* account for uncounted reference */
+	    if (PyDict_DelItem(self->data, o->oid) < 0)
+		return -1;
+	}
+	o->cache = NULL;
+	Py_DECREF(self);
+	self->ring_home.r_next = here->r_next;
+	o->ring.r_prev = NULL;
+	o->ring.r_next = NULL;
+	Py_DECREF(o);
+	here = here->r_next;
+    }
+
+    Py_XDECREF(self->jar);
+
+    while (PyDict_Next(self->data, &pos, &k, &v)) {
+	Py_INCREF(v);
+	if (PyDict_SetItem(self->data, k, Py_None) < 0)
+	    return -1;
+    }
+    Py_XDECREF(self->data);
+    self->data = NULL;
+    self->jar = NULL;
+    return 0;
+}
+
+static int
+cc_traverse(ccobject *self, visitproc visit, void *arg)
+{
+    int err;
+    CPersistentRing *here;
+
+    /* If we're in the midst of cleaning up old objects, the ring contains
+     * assorted junk we must not pass on to the visit() callback.  This
+     * should be rare (our cleanup code would need to have called back
+     * into Python, which in turn triggered Python's gc).  When it happens,
+     * simply don't chase any pointers.  The cache will appear to be a
+     * source of external references then, and at worst we miss cleaning
+     * up a dead cycle until the next time Python's gc runs.
+     */
+    if (self->ring_lock)
+    	return 0;
+
+#define VISIT(SLOT) \
+    if (SLOT) { \
+	err = visit((PyObject *)(SLOT), arg); \
+	if (err) \
+		     return err; \
+    }
+
+    VISIT(self->jar);
+
+    here = self->ring_home.r_next;
+
+    /* It is possible that an object is traversed after it is cleared.
+       In that case, there is no ring.
+    */
+    if (!here)
+	return 0;
+
+    while (here != &self->ring_home) {
+	cPersistentObject *o = OBJECT_FROM_RING(self, here);
+	VISIT(o);
+	here = here->r_next;
+    }
+#undef VISIT
+
+    return 0;
+}
+
+static int
+cc_length(ccobject *self)
+{
+    return PyObject_Length(self->data);
+}
+
+static PyObject *
+cc_subscript(ccobject *self, PyObject *key)
+{
+    PyObject *r;
+
+    r = PyDict_GetItem(self->data, key);
+    if (r == NULL) {
+	PyErr_SetObject(PyExc_KeyError, key);
+	return NULL;
+    }
+    Py_INCREF(r);
+
+    return r;
+}
+
+static int
+cc_add_item(ccobject *self, PyObject *key, PyObject *v)
+{
+    int result;
+    PyObject *oid, *object_again, *jar;
+    cPersistentObject *p;
+
+    /* Sanity check the value given to make sure it is allowed in the cache */
+    if (PyType_Check(v)) {
+        /* Its a persistent class, such as a ZClass. Thats ok. */
+    }
+    else if (v->ob_type->tp_basicsize < sizeof(cPersistentObject)) {
+        /* If it's not an instance of a persistent class, (ie Python
+	   classes that derive from persistent.Persistent, BTrees,
+	   etc), report an error.
+
+	   TODO:  checking sizeof() seems a poor test.
+	*/
+	PyErr_SetString(PyExc_TypeError,
+			"Cache values must be persistent objects.");
+	return -1;
+    }
+
+    /* Can't access v->oid directly because the object might be a
+     *  persistent class.
+     */
+    oid = PyObject_GetAttr(v, py__p_oid);
+    if (oid == NULL)
+	return -1;
+    if (! PyString_Check(oid)) {
+        PyErr_Format(PyExc_TypeError,
+                     "Cached object oid must be a string, not a %s",
+		     oid->ob_type->tp_name);
+	return -1;
+    }
+
+    /*  we know they are both strings.
+     *  now check if they are the same string.
+     */
+    result = PyObject_Compare(key, oid);
+    if (PyErr_Occurred()) {
+	Py_DECREF(oid);
+	return -1;
+    }
+    Py_DECREF(oid);
+    if (result) {
+	PyErr_SetString(PyExc_ValueError, "Cache key does not match oid");
+	return -1;
+    }
+
+    /* useful sanity check, but not strictly an invariant of this class */
+    jar = PyObject_GetAttr(v, py__p_jar);
+    if (jar == NULL)
+        return -1;
+    if (jar==Py_None) {
+        Py_DECREF(jar);
+        PyErr_SetString(PyExc_ValueError,
+                        "Cached object jar missing");
+	return -1;
+    }
+    Py_DECREF(jar);
+
+    object_again = PyDict_GetItem(self->data, key);
+    if (object_again) {
+	if (object_again != v) {
+	    PyErr_SetString(PyExc_ValueError,
+		    "A different object already has the same oid");
+	    return -1;
+	} else {
+	    /* re-register under the same oid - no work needed */
+	    return 0;
+	}
+    }
+
+    if (PyType_Check(v)) {
+	if (PyDict_SetItem(self->data, key, v) < 0)
+	    return -1;
+	self->klass_count++;
+	return 0;
+    } else {
+	PerCache *cache = ((cPersistentObject *)v)->cache;
+	if (cache) {
+	    if (cache != (PerCache *)self)
+		/* This object is already in a different cache. */
+		PyErr_SetString(PyExc_ValueError,
+				"Cache values may only be in one cache.");
+	    return -1;
+	}
+	/* else:
+
+	   This object is already one of ours, which is ok.  It
+	   would be very strange if someone was trying to register
+	   the same object under a different key.
+	*/
+    }
+
+    if (PyDict_SetItem(self->data, key, v) < 0)
+	return -1;
+    /* the dict should have a borrowed reference */
+    Py_DECREF(v);
+
+    p = (cPersistentObject *)v;
+    Py_INCREF(self);
+    p->cache = (PerCache *)self;
+    if (p->state >= 0) {
+	/* insert this non-ghost object into the ring just
+	   behind the home position. */
+	self->non_ghost_count++;
+	ring_add(&self->ring_home, &p->ring);
+	/* this list should have a new reference to the object */
+	Py_INCREF(v);
+    }
+    return 0;
+}
+
+static int
+cc_del_item(ccobject *self, PyObject *key)
+{
+    PyObject *v;
+    cPersistentObject *p;
+
+    /* unlink this item from the ring */
+    v = PyDict_GetItem(self->data, key);
+    if (v == NULL) {
+	PyErr_SetObject(PyExc_KeyError, key);
+	return -1;
+    }
+
+    if (PyType_Check(v)) {
+	self->klass_count--;
+    } else {
+	p = (cPersistentObject *)v;
+	if (p->state >= 0) {
+	    self->non_ghost_count--;
+	    ring_del(&p->ring);
+	    /* The DelItem below will account for the reference
+	       held by the list. */
+	} else {
+	    /* This is a ghost object, so we haven't kept a reference
+	       count on it.  For it have stayed alive this long
+	       someone else must be keeping a reference to
+	       it. Therefore we need to temporarily give it back a
+	       reference count before calling DelItem below */
+	    Py_INCREF(v);
+	}
+
+	Py_DECREF((PyObject *)p->cache);
+	p->cache = NULL;
+    }
+
+    if (PyDict_DelItem(self->data, key) < 0) {
+	PyErr_SetString(PyExc_RuntimeError,
+			"unexpectedly couldn't remove key in cc_ass_sub");
+	return -1;
+    }
+
+    return 0;
+}
+
+static int
+cc_ass_sub(ccobject *self, PyObject *key, PyObject *v)
+{
+    if (!PyString_Check(key)) {
+	PyErr_Format(PyExc_TypeError,
+                     "cPickleCache key must be a string, not a %s",
+		     key->ob_type->tp_name);
+	return -1;
+    }
+    if (v)
+	return cc_add_item(self, key, v);
+    else
+	return cc_del_item(self, key);
+}
+
+static PyMappingMethods cc_as_mapping = {
+  (inquiry)cc_length,		/*mp_length*/
+  (binaryfunc)cc_subscript,	/*mp_subscript*/
+  (objobjargproc)cc_ass_sub,	/*mp_ass_subscript*/
+};
+
+static PyObject *
+cc_cache_data(ccobject *self, void *context)
+{
+    return PyDict_Copy(self->data);
+}
+
+static PyGetSetDef cc_getsets[] = {
+    {"cache_data", (getter)cc_cache_data},
+    {NULL}
+};
+
+
+static PyMemberDef cc_members[] = {
+    {"cache_size", T_INT, offsetof(ccobject, cache_size)},
+    {"cache_size_bytes", T_LONG, offsetof(ccobject, cache_size_bytes)},
+    {"total_estimated_size", T_LONG, offsetof(ccobject, total_estimated_size), RO},
+    {"cache_drain_resistance", T_INT,
+     offsetof(ccobject, cache_drain_resistance)},
+    {"cache_non_ghost_count", T_INT, offsetof(ccobject, non_ghost_count), RO},
+    {"cache_klass_count", T_INT, offsetof(ccobject, klass_count), RO},
+    {NULL}
+};
+
+/* This module is compiled as a shared library.  Some compilers don't
+   allow addresses of Python objects defined in other libraries to be
+   used in static initializers here.  The DEFERRED_ADDRESS macro is
+   used to tag the slots where such addresses appear; the module init
+   function must fill in the tagged slots at runtime.  The argument is
+   for documentation -- the macro ignores it.
+*/
+#define DEFERRED_ADDRESS(ADDR) 0
+
+static PyTypeObject Cctype = {
+    PyObject_HEAD_INIT(DEFERRED_ADDRESS(&PyType_Type))
+    0,					/* ob_size */
+    "persistent.PickleCache",		/* tp_name */
+    sizeof(ccobject),			/* tp_basicsize */
+    0,					/* tp_itemsize */
+    (destructor)cc_dealloc,		/* tp_dealloc */
+    0,					/* tp_print */
+    0,					/* tp_getattr */
+    0,					/* tp_setattr */
+    0,					/* tp_compare */
+    0,					/* tp_repr */
+    0,					/* tp_as_number */
+    0,					/* tp_as_sequence */
+    &cc_as_mapping,			/* tp_as_mapping */
+    0,					/* tp_hash */
+    0,					/* tp_call */
+    0,					/* tp_str */
+    0,					/* tp_getattro */
+    0,					/* tp_setattro */
+    0,					/* tp_as_buffer */
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
+    					/* tp_flags */
+    0,					/* tp_doc */
+    (traverseproc)cc_traverse,		/* tp_traverse */
+    (inquiry)cc_clear,			/* tp_clear */
+    0,					/* tp_richcompare */
+    0,					/* tp_weaklistoffset */
+    0,					/* tp_iter */
+    0,					/* tp_iternext */
+    cc_methods,				/* tp_methods */
+    cc_members,				/* tp_members */
+    cc_getsets,				/* tp_getset */
+    0,					/* tp_base */
+    0,					/* tp_dict */
+    0,					/* tp_descr_get */
+    0,					/* tp_descr_set */
+    0,					/* tp_dictoffset */
+    (initproc)cc_init,			/* tp_init */
+};
+
+void
+initcPickleCache(void)
+{
+    PyObject *m;
+
+    Cctype.ob_type = &PyType_Type;
+    Cctype.tp_new = &PyType_GenericNew;
+    if (PyType_Ready(&Cctype) < 0) {
+	return;
+    }
+
+    m = Py_InitModule3("cPickleCache", NULL, cPickleCache_doc_string);
+
+    capi = (cPersistenceCAPIstruct *)PyCObject_Import(
+	"persistent.cPersistence", "CAPI");
+    if (!capi)
+	return;
+    capi->percachedel = (percachedelfunc)cc_oid_unreferenced;
+
+    py__p_changed = PyString_InternFromString("_p_changed");
+    if (!py__p_changed)
+        return;
+    py__p_deactivate = PyString_InternFromString("_p_deactivate");
+    if (!py__p_deactivate)
+        return;
+    py__p_jar = PyString_InternFromString("_p_jar");
+    if (!py__p_jar)
+        return;
+    py__p_oid = PyString_InternFromString("_p_oid");
+    if (!py__p_oid)
+        return;
+
+    if (PyModule_AddStringConstant(m, "cache_variant", "stiff/c") < 0)
+	return;
+
+    /* This leaks a reference to Cctype, but it doesn't matter. */
+    if (PyModule_AddObject(m, "PickleCache", (PyObject *)&Cctype) < 0)
+	return;
+}

Deleted: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/tests/persistent.txt
===================================================================
--- Sandbox/wichert/ZODB38-jarn/src/persistent/tests/persistent.txt	2008-09-22 16:32:13 UTC (rev 91359)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/tests/persistent.txt	2008-09-22 16:53:27 UTC (rev 91361)
@@ -1,458 +0,0 @@
-Tests for `persistent.Persistent`
-=================================
-
-This document is an extended doc test that covers the basics of the
-Persistent base class.  The test expects a class named `P` to be
-provided in its globals.  The `P` class implements the `Persistent`
-interface.
-
-Test framework
---------------
-
-The class `P` needs to behave like `ExampleP`.  (Note that the code below
-is *not* part of the tests.)
-
-::
-
-  class ExampleP(Persistent):
-      def __init__(self):
-          self.x = 0
-      def inc(self):
-          self.x += 1
-
-The tests use stub data managers.  A data manager is responsible for
-loading and storing the state of a persistent object.  It's stored in
-the ``_p_jar`` attribute of a persistent object.
-
-  >>> class DM:
-  ...     def __init__(self):
-  ...         self.called = 0
-  ...     def register(self, ob):
-  ...         self.called += 1
-  ...     def setstate(self, ob):
-  ...         ob.__setstate__({'x': 42})
-
-  >>> class BrokenDM(DM):
-  ...     def register(self,ob):
-  ...         self.called += 1
-  ...         raise NotImplementedError
-  ...     def setstate(self,ob):
-  ...         raise NotImplementedError
-
-  >>> from persistent import Persistent
-
-
-Test Persistent without Data Manager
-------------------------------------
-
-First do some simple tests of a Persistent instance that does not have
-a data manager (``_p_jar``).
-
-  >>> p = P()
-  >>> p.x
-  0
-  >>> p._p_changed
-  False
-  >>> p._p_state
-  0
-  >>> p._p_jar
-  >>> p._p_oid
-
-Verify that modifications have no effect on ``_p_state`` of ``_p_changed``.
-
-  >>> p.inc()
-  >>> p.inc()
-  >>> p.x
-  2
-  >>> p._p_changed
-  False
-  >>> p._p_state
-  0
-
-Try all sorts of different ways to change the object's state.
-
-  >>> p._p_deactivate()
-  >>> p._p_state
-  0
-  >>> p._p_changed = True
-  >>> p._p_state
-  0
-  >>> del p._p_changed
-  >>> p._p_changed
-  False
-  >>> p._p_state
-  0
-  >>> p.x
-  2
-
-
-Test Persistent with Data Manager
----------------------------------
-
-Next try some tests of an object with a data manager.  The `DM` class is
-a simple testing stub.
-
-  >>> p = P()
-  >>> dm = DM()
-  >>> p._p_oid = "00000012"
-  >>> p._p_jar = dm
-  >>> p._p_changed
-  0
-  >>> dm.called
-  0
-
-Modifying the object marks it as changed and registers it with the data
-manager.  Subsequent modifications don't have additional side-effects.
-
-  >>> p.inc()
-  >>> p._p_changed
-  1
-  >>> dm.called
-  1
-  >>> p.inc()
-  >>> p._p_changed
-  1
-  >>> dm.called
-  1
-
-It's not possible to deactivate a modified object.
-
-  >>> p._p_deactivate()
-  >>> p._p_changed
-  1
-
-It is possible to invalidate it.  That's the key difference between
-deactivation and invalidation.
-
-  >>> p._p_invalidate()
-  >>> p._p_state
-  -1
-
-Now that the object is a ghost, any attempt to modify it will require that it
-be unghosted first.  The test data manager has the odd property that it sets
-the object's ``x`` attribute to ``42`` when it is unghosted.
-
-  >>> p.inc()
-  >>> p.x
-  43
-  >>> dm.called
-  2
-
-You can manually reset the changed field to ``False``, although it's not clear
-why you would want to do that.  The object changes to the ``UPTODATE`` state
-but retains its modifications.
-
-  >>> p._p_changed = False
-  >>> p._p_state
-  0
-  >>> p._p_changed
-  False
-  >>> p.x
-  43
-
-  >>> p.inc()
-  >>> p._p_changed
-  True
-  >>> dm.called
-  3
-
-``__getstate__()`` and ``__setstate__()``
------------------------------------------
-
-The next several tests cover the ``__getstate__()`` and ``__setstate__()``
-implementations.
-
-  >>> p = P()
-  >>> state = p.__getstate__()
-  >>> isinstance(state, dict)
-  True
-  >>> state['x']
-  0
-  >>> p._p_state
-  0
-
-Calling setstate always leaves the object in the uptodate state?
-(I'm not entirely clear on this one.)
-
-  >>> p.__setstate__({'x': 5})
-  >>> p._p_state
-  0
-
-Assigning to a volatile attribute has no effect on the object state.
-
-  >>> p._v_foo = 2
-  >>> p.__getstate__()
-  {'x': 5}
-  >>> p._p_state
-  0
-
-The ``_p_serial`` attribute is not affected by calling setstate.
-
-  >>> p._p_serial = "00000012"
-  >>> p.__setstate__(p.__getstate__())
-  >>> p._p_serial
-  '00000012'
-
-
-Change Ghost test
------------------
-
-If an object is a ghost and its ``_p_changed`` is set to ``True`` (any true
-value), it should activate (unghostify) the object.  This behavior is new in
-ZODB 3.6; before then, an attempt to do ``ghost._p_changed = True`` was
-ignored.
-
-  >>> p = P()
-  >>> p._p_jar = DM()
-  >>> p._p_oid = 1
-  >>> p._p_deactivate()
-  >>> p._p_changed # None
-  >>> p._p_state # ghost state
-  -1
-  >>> p._p_changed = True
-  >>> p._p_changed
-  1
-  >>> p._p_state # changed state
-  1
-  >>> p.x
-  42
-
-
-Activate, deactivate, and invalidate
-------------------------------------
-
-Some of these tests are redundant, but are included to make sure there
-are explicit and simple tests of ``_p_activate()``, ``_p_deactivate()``, and
-``_p_invalidate()``.
-
-  >>> p = P()
-  >>> p._p_oid = 1
-  >>> p._p_jar = DM()
-  >>> p._p_deactivate()
-  >>> p._p_state
-  -1
-  >>> p._p_activate()
-  >>> p._p_state
-  0
-  >>> p.x
-  42
-  >>> p.inc()
-  >>> p.x
-  43
-  >>> p._p_state
-  1
-  >>> p._p_invalidate()
-  >>> p._p_state
-  -1
-  >>> p.x
-  42
-
-
-Test failures
--------------
-
-The following tests cover various errors cases.
-
-When an object is modified, it registers with its data manager.  If that
-registration fails, the exception is propagated and the object stays in the
-up-to-date state.  It shouldn't change to the modified state, because it won't
-be saved when the transaction commits.
-
-  >>> p = P()
-  >>> p._p_oid = 1
-  >>> p._p_jar = BrokenDM()
-  >>> p._p_state
-  0
-  >>> p._p_jar.called
-  0
-  >>> p._p_changed = 1
-  Traceback (most recent call last):
-    ...
-  NotImplementedError
-  >>> p._p_jar.called
-  1
-  >>> p._p_state
-  0
-
-Make sure that exceptions that occur inside the data manager's ``setstate()``
-method propagate out to the caller.
-
-  >>> p = P()
-  >>> p._p_oid = 1
-  >>> p._p_jar = BrokenDM()
-  >>> p._p_deactivate()
-  >>> p._p_state
-  -1
-  >>> p._p_activate()
-  Traceback (most recent call last):
-    ...
-  NotImplementedError
-  >>> p._p_state
-  -1
-
-
-Special test to cover layout of ``__dict__``
---------------------------------------------
-
-We once had a bug in the `Persistent` class that calculated an incorrect
-offset for the ``__dict__`` attribute.  It assigned ``__dict__`` and
-``_p_jar`` to the same location in memory.  This is a simple test to make sure
-they have different locations.
-
-  >>> p = P()
-  >>> p.inc()
-  >>> p.inc()
-  >>> 'x' in p.__dict__
-  True
-  >>> p._p_jar
-
-
-Inheritance and metaclasses
----------------------------
-
-Simple tests to make sure it's possible to inherit from the `Persistent` base
-class multiple times.  There used to be metaclasses involved in `Persistent`
-that probably made this a more interesting test.
-
-  >>> class A(Persistent):
-  ...     pass
-  >>> class B(Persistent):
-  ...     pass
-  >>> class C(A, B):
-  ...     pass
-  >>> class D(object):
-  ...     pass
-  >>> class E(D, B):
-  ...     pass
-  >>> a = A()
-  >>> b = B()
-  >>> c = C()
-  >>> d = D()
-  >>> e = E()
-
-Also make sure that it's possible to define `Persistent` classes that have a
-custom metaclass.
-
-  >>> class alternateMeta(type):
-  ...     type
-  >>> class alternate(object):
-  ...     __metaclass__ = alternateMeta
-  >>> class mixedMeta(alternateMeta, type):
-  ...     pass
-  >>> class mixed(alternate, Persistent):
-  ...     pass
-  >>> class mixed(Persistent, alternate):
-  ...     pass
-
-
-Basic type structure
---------------------
-
-  >>> Persistent.__dictoffset__
-  0
-  >>> Persistent.__weakrefoffset__
-  0
-  >>> Persistent.__basicsize__ > object.__basicsize__
-  True
-  >>> P.__dictoffset__ > 0
-  True
-  >>> P.__weakrefoffset__ > 0
-  True
-  >>> P.__dictoffset__ < P.__weakrefoffset__
-  True
-  >>> P.__basicsize__ > Persistent.__basicsize__
-  True
-
-
-Slots
------
-
-These are some simple tests of classes that have an ``__slots__``
-attribute.  Some of the classes should have slots, others shouldn't.
-
-  >>> class noDict(object):
-  ...     __slots__ = ['foo']
-  >>> class p_noDict(Persistent):
-  ...     __slots__ = ['foo']
-  >>> class p_shouldHaveDict(p_noDict):
-  ...     pass
-
-  >>> p_noDict.__dictoffset__
-  0
-  >>> x = p_noDict()
-  >>> x.foo = 1
-  >>> x.foo
-  1
-  >>> x.bar = 1
-  Traceback (most recent call last):
-    ...
-  AttributeError: 'p_noDict' object has no attribute 'bar'
-  >>> x._v_bar = 1
-  Traceback (most recent call last):
-    ...
-  AttributeError: 'p_noDict' object has no attribute '_v_bar'
-  >>> x.__dict__
-  Traceback (most recent call last):
-    ...
-  AttributeError: 'p_noDict' object has no attribute '__dict__'
-
-  The various _p_ attributes are unaffected by slots.
-  >>> p._p_oid
-  >>> p._p_jar
-  >>> p._p_state
-  0
-
-If the most-derived class does not specify
-
-  >>> p_shouldHaveDict.__dictoffset__ > 0
-  True
-  >>> x = p_shouldHaveDict()
-  >>> isinstance(x.__dict__, dict)
-  True
-
-
-Pickling
---------
-
-There's actually a substantial effort involved in making subclasses of
-`Persistent` work with plain-old pickle.  The ZODB serialization layer never
-calls pickle on an object; it pickles the object's class description and its
-state as two separate pickles.
-
-  >>> import pickle
-  >>> p = P()
-  >>> p.inc()
-  >>> p2 = pickle.loads(pickle.dumps(p))
-  >>> p2.__class__ is P
-  True
-  >>> p2.x == p.x
-  True
-
-We should also test that pickle works with custom getstate and setstate.
-Perhaps even reduce.  The problem is that pickling depends on finding the
-class in a particular module, and classes defined here won't appear in any
-module.  We could require each user of the tests to define a base class, but
-that might be tedious.
-
-
-Interfaces
-----------
-
-Some versions of Zope and ZODB have the `zope.interfaces` package available.
-If it is available, then persistent will be associated with several
-interfaces.  It's hard to write a doctest test that runs the tests only if
-`zope.interface` is available, so this test looks a little unusual.  One
-problem is that the assert statements won't do anything if you run with `-O`.
-
-  >>> try:
-  ...     import zope.interface
-  ... except ImportError:
-  ...     pass
-  ... else:
-  ...     from persistent.interfaces import IPersistent
-  ...     assert IPersistent.implementedBy(Persistent)
-  ...     p = Persistent()
-  ...     assert IPersistent.providedBy(p)
-  ...     assert IPersistent.implementedBy(P)
-  ...     p = P()
-  ...     assert IPersistent.providedBy(p)

Copied: Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/tests/persistent.txt (from rev 91360, Sandbox/wichert/ZODB38-jarn/src/persistent/tests/persistent.txt)
===================================================================
--- Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/tests/persistent.txt	                        (rev 0)
+++ Sandbox/wichert/tags/ZODB-3.8.1b8.jarn.1/src/persistent/tests/persistent.txt	2008-09-22 16:53:27 UTC (rev 91361)
@@ -0,0 +1,476 @@
+Tests for `persistent.Persistent`
+=================================
+
+This document is an extended doc test that covers the basics of the
+Persistent base class.  The test expects a class named `P` to be
+provided in its globals.  The `P` class implements the `Persistent`
+interface.
+
+Test framework
+--------------
+
+The class `P` needs to behave like `ExampleP`.  (Note that the code below
+is *not* part of the tests.)
+
+::
+
+  class ExampleP(Persistent):
+      def __init__(self):
+          self.x = 0
+      def inc(self):
+          self.x += 1
+
+The tests use stub data managers.  A data manager is responsible for
+loading and storing the state of a persistent object.  It's stored in
+the ``_p_jar`` attribute of a persistent object.
+
+  >>> class DM:
+  ...     def __init__(self):
+  ...         self.called = 0
+  ...     def register(self, ob):
+  ...         self.called += 1
+  ...     def setstate(self, ob):
+  ...         ob.__setstate__({'x': 42})
+
+  >>> class BrokenDM(DM):
+  ...     def register(self,ob):
+  ...         self.called += 1
+  ...         raise NotImplementedError
+  ...     def setstate(self,ob):
+  ...         raise NotImplementedError
+
+  >>> from persistent import Persistent
+
+
+Test Persistent without Data Manager
+------------------------------------
+
+First do some simple tests of a Persistent instance that does not have
+a data manager (``_p_jar``).
+
+  >>> p = P()
+  >>> p.x
+  0
+  >>> p._p_changed
+  False
+  >>> p._p_state
+  0
+  >>> p._p_jar
+  >>> p._p_oid
+
+Verify that modifications have no effect on ``_p_state`` of ``_p_changed``.
+
+  >>> p.inc()
+  >>> p.inc()
+  >>> p.x
+  2
+  >>> p._p_changed
+  False
+  >>> p._p_state
+  0
+
+Try all sorts of different ways to change the object's state.
+
+  >>> p._p_deactivate()
+  >>> p._p_state
+  0
+  >>> p._p_changed = True
+  >>> p._p_state
+  0
+  >>> del p._p_changed
+  >>> p._p_changed
+  False
+  >>> p._p_state
+  0
+  >>> p.x
+  2
+
+We can store a size estimation in ``_p_estimated_size``. Its default is 0.
+The size estimation can be used by a cache associated with the data manager
+to help in the implementation of its replacement strategy or its size bounds.
+Of course, the estimated size must not be negative.
+
+  >>> p._p_estimated_size
+  0
+  >>> p._p_estimated_size = 1000
+  >>> p._p_estimated_size
+  1000
+  >>> p._p_estimated_size = -1
+  Traceback (most recent call last):
+  ....
+  ValueError: _p_estimated_size must not be negative
+  
+    
+
+
+
+Test Persistent with Data Manager
+---------------------------------
+
+Next try some tests of an object with a data manager.  The `DM` class is
+a simple testing stub.
+
+  >>> p = P()
+  >>> dm = DM()
+  >>> p._p_oid = "00000012"
+  >>> p._p_jar = dm
+  >>> p._p_changed
+  0
+  >>> dm.called
+  0
+
+Modifying the object marks it as changed and registers it with the data
+manager.  Subsequent modifications don't have additional side-effects.
+
+  >>> p.inc()
+  >>> p._p_changed
+  1
+  >>> dm.called
+  1
+  >>> p.inc()
+  >>> p._p_changed
+  1
+  >>> dm.called
+  1
+
+It's not possible to deactivate a modified object.
+
+  >>> p._p_deactivate()
+  >>> p._p_changed
+  1
+
+It is possible to invalidate it.  That's the key difference between
+deactivation and invalidation.
+
+  >>> p._p_invalidate()
+  >>> p._p_state
+  -1
+
+Now that the object is a ghost, any attempt to modify it will require that it
+be unghosted first.  The test data manager has the odd property that it sets
+the object's ``x`` attribute to ``42`` when it is unghosted.
+
+  >>> p.inc()
+  >>> p.x
+  43
+  >>> dm.called
+  2
+
+You can manually reset the changed field to ``False``, although it's not clear
+why you would want to do that.  The object changes to the ``UPTODATE`` state
+but retains its modifications.
+
+  >>> p._p_changed = False
+  >>> p._p_state
+  0
+  >>> p._p_changed
+  False
+  >>> p.x
+  43
+
+  >>> p.inc()
+  >>> p._p_changed
+  True
+  >>> dm.called
+  3
+
+``__getstate__()`` and ``__setstate__()``
+-----------------------------------------
+
+The next several tests cover the ``__getstate__()`` and ``__setstate__()``
+implementations.
+
+  >>> p = P()
+  >>> state = p.__getstate__()
+  >>> isinstance(state, dict)
+  True
+  >>> state['x']
+  0
+  >>> p._p_state
+  0
+
+Calling setstate always leaves the object in the uptodate state?
+(I'm not entirely clear on this one.)
+
+  >>> p.__setstate__({'x': 5})
+  >>> p._p_state
+  0
+
+Assigning to a volatile attribute has no effect on the object state.
+
+  >>> p._v_foo = 2
+  >>> p.__getstate__()
+  {'x': 5}
+  >>> p._p_state
+  0
+
+The ``_p_serial`` attribute is not affected by calling setstate.
+
+  >>> p._p_serial = "00000012"
+  >>> p.__setstate__(p.__getstate__())
+  >>> p._p_serial
+  '00000012'
+
+
+Change Ghost test
+-----------------
+
+If an object is a ghost and its ``_p_changed`` is set to ``True`` (any true
+value), it should activate (unghostify) the object.  This behavior is new in
+ZODB 3.6; before then, an attempt to do ``ghost._p_changed = True`` was
+ignored.
+
+  >>> p = P()
+  >>> p._p_jar = DM()
+  >>> p._p_oid = 1
+  >>> p._p_deactivate()
+  >>> p._p_changed # None
+  >>> p._p_state # ghost state
+  -1
+  >>> p._p_changed = True
+  >>> p._p_changed
+  1
+  >>> p._p_state # changed state
+  1
+  >>> p.x
+  42
+
+
+Activate, deactivate, and invalidate
+------------------------------------
+
+Some of these tests are redundant, but are included to make sure there
+are explicit and simple tests of ``_p_activate()``, ``_p_deactivate()``, and
+``_p_invalidate()``.
+
+  >>> p = P()
+  >>> p._p_oid = 1
+  >>> p._p_jar = DM()
+  >>> p._p_deactivate()
+  >>> p._p_state
+  -1
+  >>> p._p_activate()
+  >>> p._p_state
+  0
+  >>> p.x
+  42
+  >>> p.inc()
+  >>> p.x
+  43
+  >>> p._p_state
+  1
+  >>> p._p_invalidate()
+  >>> p._p_state
+  -1
+  >>> p.x
+  42
+
+
+Test failures
+-------------
+
+The following tests cover various errors cases.
+
+When an object is modified, it registers with its data manager.  If that
+registration fails, the exception is propagated and the object stays in the
+up-to-date state.  It shouldn't change to the modified state, because it won't
+be saved when the transaction commits.
+
+  >>> p = P()
+  >>> p._p_oid = 1
+  >>> p._p_jar = BrokenDM()
+  >>> p._p_state
+  0
+  >>> p._p_jar.called
+  0
+  >>> p._p_changed = 1
+  Traceback (most recent call last):
+    ...
+  NotImplementedError
+  >>> p._p_jar.called
+  1
+  >>> p._p_state
+  0
+
+Make sure that exceptions that occur inside the data manager's ``setstate()``
+method propagate out to the caller.
+
+  >>> p = P()
+  >>> p._p_oid = 1
+  >>> p._p_jar = BrokenDM()
+  >>> p._p_deactivate()
+  >>> p._p_state
+  -1
+  >>> p._p_activate()
+  Traceback (most recent call last):
+    ...
+  NotImplementedError
+  >>> p._p_state
+  -1
+
+
+Special test to cover layout of ``__dict__``
+--------------------------------------------
+
+We once had a bug in the `Persistent` class that calculated an incorrect
+offset for the ``__dict__`` attribute.  It assigned ``__dict__`` and
+``_p_jar`` to the same location in memory.  This is a simple test to make sure
+they have different locations.
+
+  >>> p = P()
+  >>> p.inc()
+  >>> p.inc()
+  >>> 'x' in p.__dict__
+  True
+  >>> p._p_jar
+
+
+Inheritance and metaclasses
+---------------------------
+
+Simple tests to make sure it's possible to inherit from the `Persistent` base
+class multiple times.  There used to be metaclasses involved in `Persistent`
+that probably made this a more interesting test.
+
+  >>> class A(Persistent):
+  ...     pass
+  >>> class B(Persistent):
+  ...     pass
+  >>> class C(A, B):
+  ...     pass
+  >>> class D(object):
+  ...     pass
+  >>> class E(D, B):
+  ...     pass
+  >>> a = A()
+  >>> b = B()
+  >>> c = C()
+  >>> d = D()
+  >>> e = E()
+
+Also make sure that it's possible to define `Persistent` classes that have a
+custom metaclass.
+
+  >>> class alternateMeta(type):
+  ...     type
+  >>> class alternate(object):
+  ...     __metaclass__ = alternateMeta
+  >>> class mixedMeta(alternateMeta, type):
+  ...     pass
+  >>> class mixed(alternate, Persistent):
+  ...     pass
+  >>> class mixed(Persistent, alternate):
+  ...     pass
+
+
+Basic type structure
+--------------------
+
+  >>> Persistent.__dictoffset__
+  0
+  >>> Persistent.__weakrefoffset__
+  0
+  >>> Persistent.__basicsize__ > object.__basicsize__
+  True
+  >>> P.__dictoffset__ > 0
+  True
+  >>> P.__weakrefoffset__ > 0
+  True
+  >>> P.__dictoffset__ < P.__weakrefoffset__
+  True
+  >>> P.__basicsize__ > Persistent.__basicsize__
+  True
+
+
+Slots
+-----
+
+These are some simple tests of classes that have an ``__slots__``
+attribute.  Some of the classes should have slots, others shouldn't.
+
+  >>> class noDict(object):
+  ...     __slots__ = ['foo']
+  >>> class p_noDict(Persistent):
+  ...     __slots__ = ['foo']
+  >>> class p_shouldHaveDict(p_noDict):
+  ...     pass
+
+  >>> p_noDict.__dictoffset__
+  0
+  >>> x = p_noDict()
+  >>> x.foo = 1
+  >>> x.foo
+  1
+  >>> x.bar = 1
+  Traceback (most recent call last):
+    ...
+  AttributeError: 'p_noDict' object has no attribute 'bar'
+  >>> x._v_bar = 1
+  Traceback (most recent call last):
+    ...
+  AttributeError: 'p_noDict' object has no attribute '_v_bar'
+  >>> x.__dict__
+  Traceback (most recent call last):
+    ...
+  AttributeError: 'p_noDict' object has no attribute '__dict__'
+
+  The various _p_ attributes are unaffected by slots.
+  >>> p._p_oid
+  >>> p._p_jar
+  >>> p._p_state
+  0
+
+If the most-derived class does not specify
+
+  >>> p_shouldHaveDict.__dictoffset__ > 0
+  True
+  >>> x = p_shouldHaveDict()
+  >>> isinstance(x.__dict__, dict)
+  True
+
+
+Pickling
+--------
+
+There's actually a substantial effort involved in making subclasses of
+`Persistent` work with plain-old pickle.  The ZODB serialization layer never
+calls pickle on an object; it pickles the object's class description and its
+state as two separate pickles.
+
+  >>> import pickle
+  >>> p = P()
+  >>> p.inc()
+  >>> p2 = pickle.loads(pickle.dumps(p))
+  >>> p2.__class__ is P
+  True
+  >>> p2.x == p.x
+  True
+
+We should also test that pickle works with custom getstate and setstate.
+Perhaps even reduce.  The problem is that pickling depends on finding the
+class in a particular module, and classes defined here won't appear in any
+module.  We could require each user of the tests to define a base class, but
+that might be tedious.
+
+
+Interfaces
+----------
+
+Some versions of Zope and ZODB have the `zope.interfaces` package available.
+If it is available, then persistent will be associated with several
+interfaces.  It's hard to write a doctest test that runs the tests only if
+`zope.interface` is available, so this test looks a little unusual.  One
+problem is that the assert statements won't do anything if you run with `-O`.
+
+  >>> try:
+  ...     import zope.interface
+  ... except ImportError:
+  ...     pass
+  ... else:
+  ...     from persistent.interfaces import IPersistent
+  ...     assert IPersistent.implementedBy(Persistent)
+  ...     p = Persistent()
+  ...     assert IPersistent.providedBy(p)
+  ...     assert IPersistent.implementedBy(P)
+  ...     p = P()
+  ...     assert IPersistent.providedBy(p)



More information about the Checkins mailing list