[Zodb-checkins] CVS: ZODB3/ZODB - serialize.py:1.2.8.1 utils.py:1.18.2.1 fstools.py:1.1.62.1 fsrecover.py:1.13.2.1 fspack.py:1.13.2.1 fsIndex.py:1.4.54.1 coptimizations.c:1.23.50.1 conversionhack.py:1.4.114.1 __init__.py:1.24.2.1 Transaction.py:1.53.2.1 POSException.py:1.21.2.1 Mount.py:1.19.36.1 MappingStorage.py:1.9.40.6 ExportImport.py:1.17.2.1 DemoStorage.py:1.21.2.7 DB.py:1.55.2.2 Connection.py:1.100.2.9 ConflictResolution.py:1.19.2.1 BaseStorage.py:1.36.2.6 cPersistence.h:NONE cPersistence.c:NONE PersistentMapping.py:NONE PersistentList.py:NONE

Jeremy Hylton jeremy at zope.com
Tue Dec 23 14:07:06 EST 2003


Update of /cvs-repository/ZODB3/ZODB
In directory cvs.zope.org:/tmp/cvs-serv26665/ZODB

Modified Files:
      Tag: ZODB3-mvcc-2-branch
	utils.py fstools.py fsrecover.py fspack.py fsIndex.py 
	coptimizations.c conversionhack.py __init__.py Transaction.py 
	POSException.py Mount.py MappingStorage.py ExportImport.py 
	DemoStorage.py DB.py Connection.py ConflictResolution.py 
	BaseStorage.py 
Added Files:
      Tag: ZODB3-mvcc-2-branch
	serialize.py 
Removed Files:
      Tag: ZODB3-mvcc-2-branch
	cPersistence.h cPersistence.c PersistentMapping.py 
	PersistentList.py 
Log Message:
Merge the head to the mvcc branch.

This merge should be the final preparation for merging the branch to
the trunk.


=== Added File ZODB3/ZODB/serialize.py ===
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Support for ZODB object serialization.

ZODB serializes objects using a custom format based on Python pickles.
When an object is unserialized, it can be loaded as either a ghost or
a real object.  A ghost is a persistent object of the appropriate type
but without any state.  The first time a ghost is accessed, the
persistence machinery traps access and loads the actual state.  A
ghost allows many persistent objects to be loaded while minimizing the
memory consumption of referenced but otherwise unused objects.

Pickle format
-------------

ZODB stores serialized objects using a custom format based on pickle.
Each serialized object has two parts: the class metadata and the
object state.  The class description must provide enough information
to call the class's ``__new__`` and create an empty object.  Once the
object exists as a ghost, its state is passed to ``__setstate__``.

The class metadata can be represented in two different ways, in order
to provide backwards compatibility with many earlier versions of ZODB.
The class metadata is always a two-tuple.  The first element may also
be a tuple, containing two string elements: name of a module and the
name of a class.  The second element of the class metadata tuple is a
tuple of arguments to pass to the class's ``__new__``.

Persistent references
---------------------

A persistent reference is a pair containing an oid and class metadata.
When one persistent object pickle refers to another persistent object,
the database uses a persistent reference.  The format allows a
significant optimization, because ghosts can be created directly from
persistent references.  If the reference was just an oid, a database
access would be required to determine the class of the ghost.

Because the persistent reference includes the class, it is not
possible to change the class of a persistent object.  If a transaction
changed the class of an object, a new record with new class metadata
would be written but all the old references would still include the
old class.

"""

import cPickle
import cStringIO

from ZODB.coptimizations import new_persistent_id

_marker = object()

def myhasattr(obj, attr):
    """Returns True or False or raises an exception."""
    val = getattr(obj, attr, _marker)
    return val is not _marker

def getClassMetadata(obj):
    klass = obj.__class__
    if issubclass(klass, type):
        # Handle ZClasses.
        d = obj.__dict__.copy()
        del d["_p_jar"]
        args = obj.__name__, obj.__bases__, d
        return klass, args
    else:
        getinitargs = getattr(klass, "__getinitargs__", None)
        if getinitargs is None:
            args = None
        else:
            args = getinitargs()
        mod = getattr(klass, "__module__", None)
        if mod is None:
            return klass, args
        else:
            return (mod, klass.__name__), args

class BaseObjectWriter:
    """Serializes objects for storage in the database.

    The ObjectWriter creates object pickles in the ZODB format.  It
    also detects new persistent objects reachable from the current
    object.

    The client is responsible for calling the close() method to avoid
    leaking memory.  The ObjectWriter uses a Pickler internally, and
    Pickler objects do not participate in garbage collection.  (Note
    that in Python 2.3 and higher, the close() method would be
    unnecessary because Picklers participate in garbage collection.)
    """

    def __init__(self, jar=None):
        self._file = cStringIO.StringIO()
        self._p = cPickle.Pickler(self._file, 1)
        self._stack = []
        self._p.persistent_id = new_persistent_id(jar, self._stack)
        if jar is not None:
            assert myhasattr(jar, "new_oid")
        self._jar = jar

    def serialize(self, obj):
        return self._dump(getClassMetadata(obj), obj.__getstate__())

    def _dump(self, classmeta, state):
        # To reuse the existing cStringIO object, we must reset
        # the file position to 0 and truncate the file after the
        # new pickle is written.
        self._file.seek(0)
        self._p.clear_memo()
        self._p.dump(classmeta)
        self._p.dump(state)
        self._file.truncate()
        return self._file.getvalue()

class ObjectWriter(BaseObjectWriter):

    def __init__(self, obj):
        BaseObjectWriter.__init__(self, obj._p_jar)
        self._stack.append(obj)

    def __iter__(self):
        return NewObjectIterator(self._stack)

class NewObjectIterator:

    # The pickler is used as a forward iterator when the connection
    # is looking for new objects to pickle.

    def __init__(self, stack):
        self._stack = stack

    def __iter__(self):
        return self

    def next(self):
        if self._stack:
            elt = self._stack.pop()
            return elt
        else:
            raise StopIteration

class BaseObjectReader:

    def _persistent_load(self, oid):
        # subclasses must define _persistent_load().
        raise NotImplementedError

    def _get_class(self, module, name):
        # subclasses must define _get_class()
        raise NotImplementedError

    def _get_unpickler(self, pickle):
        file = cStringIO.StringIO(pickle)
        unpickler = cPickle.Unpickler(file)
        unpickler.persistent_load = self._persistent_load
        return unpickler

    def _new_object(self, klass, args):
        if not args and not myhasattr(klass, "__getinitargs__"):
            obj = klass.__new__(klass)
        else:
            obj = klass(*args)
            if not isinstance(klass, type):
                obj.__dict__.clear()

        return obj

    def getClassName(self, pickle):
        unpickler = self._get_unpickler(pickle)
        klass, newargs = unpickler.load()
        if isinstance(klass, tuple):
            return "%s.%s" % klass
        else:
            return klass.__name__

    def getGhost(self, pickle):
        unpickler = self._get_unpickler(pickle)
        klass, args = unpickler.load()
        if isinstance(klass, tuple):
            klass = self._get_class(*klass)

        return self._new_object(klass, args)

    def getState(self, pickle):
        unpickler = self._get_unpickler(pickle)
        unpickler.load() # skip the class metadata
        return unpickler.load()

    def setGhostState(self, obj, pickle):
        state = self.getState(pickle)
        obj.__setstate__(state)

    def getObject(self, pickle):
        unpickler = self._get_unpickler(pickle)
        klass, args = unpickler.load()
        obj = self._new_object(klass, args)
        state = unpickler.load()
        obj.__setstate__(state)
        return obj

class ExternalReference(object):
    pass

class SimpleObjectReader(BaseObjectReader):
    """Can be used to inspect a single object pickle.

    It returns an ExternalReference() object for other persistent
    objects.  It can't instantiate the object.
    """

    ext_ref = ExternalReference()

    def _persistent_load(self, oid):
        return self.ext_ref

    def _get_class(self, module, name):
        return None

class ConnectionObjectReader(BaseObjectReader):

    def __init__(self, conn, cache, factory):
        self._conn = conn
        self._cache = cache
        self._factory = factory

    def _get_class(self, module, name):
        return self._factory(self._conn, module, name)

    def _persistent_load(self, oid):
        if isinstance(oid, tuple):
            # Quick instance reference.  We know all we need to know
            # to create the instance w/o hitting the db, so go for it!
            oid, klass_info = oid
            obj = self._cache.get(oid, None) # XXX it's not a dict
            if obj is not None:
                return obj

            klass = self._get_class(*klass_info)
            # XXX Why doesn't this have args?
            obj = self._new_object(klass, None)
            # XXX This doesn't address the last fallback that used to
            # exist:
##                    # Eek, we couldn't get the class. Hm.  Maybe there's
##                    # more current data in the object's actual record!
##                    return self._conn[oid]

            # XXX should be done by connection
            obj._p_oid = oid
            obj._p_jar = self._conn
            # When an object is created, it is put in the UPTODATE
            # state.  We must explicitly deactivate it to turn it into
            # a ghost.
            obj._p_changed = None

            self._cache[oid] = obj
            return obj

        obj = self._cache.get(oid)
        if obj is not None:
            return obj
        return self._conn[oid]


=== ZODB3/ZODB/utils.py 1.18 => 1.18.2.1 ===
--- ZODB3/ZODB/utils.py:1.18	Thu Oct  2 14:17:19 2003
+++ ZODB3/ZODB/utils.py	Tue Dec 23 14:05:52 2003
@@ -13,7 +13,8 @@
 ##############################################################################
 
 import sys
-import TimeStamp, time
+import time
+from persistent.TimeStamp import TimeStamp
 
 from struct import pack, unpack
 from types import StringType
@@ -80,7 +81,7 @@
 
 
 def newTimeStamp(old=None,
-                 TimeStamp=TimeStamp.TimeStamp,
+                 TimeStamp=TimeStamp,
                  time=time.time, gmtime=time.gmtime):
     t = time()
     ts = TimeStamp(gmtime(t)[:5]+(t%60,))


=== ZODB3/ZODB/fstools.py 1.1 => 1.1.62.1 ===
--- ZODB3/ZODB/fstools.py:1.1	Mon Nov 18 15:45:48 2002
+++ ZODB3/ZODB/fstools.py	Tue Dec 23 14:05:52 2003
@@ -24,7 +24,7 @@
 from ZODB.FileStorage import TRANS_HDR, DATA_HDR, TRANS_HDR_LEN, \
      DATA_HDR_LEN, DATA_VERSION_HDR_LEN
 from ZODB.utils import p64, u64
-from ZODB.TimeStamp import TimeStamp
+from persistent.TimeStamp import TimeStamp
 
 class TxnHeader:
     """Object representing a transaction record header.


=== ZODB3/ZODB/fsrecover.py 1.13 => 1.13.2.1 ===
--- ZODB3/ZODB/fsrecover.py:1.13	Thu Oct  2 14:17:19 2003
+++ ZODB3/ZODB/fsrecover.py	Tue Dec 23 14:05:52 2003
@@ -83,7 +83,7 @@
 import getopt, ZODB.FileStorage, struct, time
 from struct import unpack
 from ZODB.utils import t32, p64, u64
-from ZODB.TimeStamp import TimeStamp
+from persistent.TimeStamp import TimeStamp
 from cPickle import loads
 from ZODB.FileStorage import RecordIterator
 
@@ -323,8 +323,8 @@
                         l = len(r.data)
 
                     print "%7d %s %s" % (u64(r.oid), l, r.version)
-                s = ofs.restore(r.oid, r.serial, r.data, r.version,
-                                r.data_txn, txn)
+                ofs.restore(r.oid, r.serial, r.data, r.version, r.data_txn,
+                            txn)
                 nrec += 1
         except (KeyboardInterrupt, SystemExit):
             raise


=== ZODB3/ZODB/fspack.py 1.13 => 1.13.2.1 ===
--- ZODB3/ZODB/fspack.py:1.13	Thu Oct  2 20:33:06 2003
+++ ZODB3/ZODB/fspack.py	Tue Dec 23 14:05:52 2003
@@ -177,7 +177,6 @@
                       "txnlen (%d) < headerlen(%d)", th.tlen, th.headerlen())
 
     def checkData(self, th, tpos, dh, pos):
-        tend = tpos + th.tlen
         if dh.tloc != tpos:
             self.fail(pos, "data record does not point to transaction header"
                       ": %d != %d", dh.tloc, tpos)
@@ -345,7 +344,6 @@
         if not prev:
             return None
 
-        pnv = None
         h = self._read_data_header(prev, oid)
         # If the previous record is for a version, it must have
         # a valid pnv.
@@ -712,7 +710,6 @@
         return pos
 
     def copyToPacktime(self):
-        offset = 0L  # the amount of space freed by packing
         pos = self._metadata_size
         new_pos = pos
 
@@ -778,7 +775,6 @@
                 s = th.asString()
                 new_tpos = self._tfile.tell()
                 self._tfile.write(s)
-                new_pos = new_tpos + len(s)
                 copy = 1
 
             if h.plen:
@@ -790,7 +786,6 @@
                 data = self.fetchBackpointer(h.oid, h.back)
 
             self.writePackedDataRecord(h, data, new_tpos)
-            new_pos = self._tfile.tell()
 
         return new_tpos, pos
 


=== ZODB3/ZODB/fsIndex.py 1.4 => 1.4.54.1 ===
--- ZODB3/ZODB/fsIndex.py:1.4	Tue Dec  3 13:45:16 2002
+++ ZODB3/ZODB/fsIndex.py	Tue Dec 23 14:05:53 2003
@@ -8,12 +8,11 @@
 # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
 # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
+# FOR A PARTICULAR PURPOSE.
 #
 ##############################################################################
-"""Implement an OID to File-position (long integer) mapping
-"""
-#
+"""Implement an OID to File-position (long integer) mapping."""
+
 # To save space, we do two things:
 #
 #     1. We split the keys (OIDS) into 6-byte prefixes and 2-byte suffixes.
@@ -29,29 +28,26 @@
 # suffix to 6-byte data. This should reduce the overall memory usage to
 # 8-16 bytes per OID.
 #
+# Since the mapping from suffix to data contains at most 256 entries,
+# we use a BTree bucket instead of a full BTree to store the results.
+#
 # We use p64 to convert integers to 8-byte strings and lop off the two
 # high-order bytes when saving. On loading data, we add the leading
-# bytes back before using U64 to convert the data back to (long)
+# bytes back before using u64 to convert the data back to (long)
 # integers.
 
-from BTrees._fsBTree import fsBTree as _fsBTree
-
+from __future__ import generators
 import struct
 
-# convert between numbers and six-byte strings
+from BTrees._fsBTree import fsBucket
 
-_t32 = 1L<< 32
+# convert between numbers and six-byte strings
 
 def num2str(n):
-    h, l = divmod(long(n), _t32)
-    return struct.pack(">HI", h, l)
+    return struct.pack(">Q", n)[2:]
 
 def str2num(s):
-    h, l = struct.unpack(">HI", s)
-    if h:
-        return (long(h) << 32) + l
-    else:
-        return l
+    return struct.unpack(">Q", "\000\000" + s)[0]
 
 class fsIndex:
 
@@ -75,7 +71,7 @@
         treekey = key[:6]
         tree = self._data.get(treekey)
         if tree is None:
-            tree = _fsBTree()
+            tree = fsBucket()
             self._data[treekey] = tree
         tree[key[6:]] = value
 
@@ -96,14 +92,19 @@
     def __contains__(self, key):
         tree = self._data.get(key[:6])
         if tree is None:
-            return 0
+            return False
         v = tree.get(key[6:], None)
         if v is None:
-            return 0
-        return 1
+            return False
+        return True
 
     def clear(self):
         self._data.clear()
+
+    def __iter__(self):
+        for prefix, tree in self._data.items():
+            for suffix in tree:
+                yield prefix + suffix
 
     def keys(self):
         r = []


=== ZODB3/ZODB/coptimizations.c 1.23 => 1.23.50.1 ===
--- ZODB3/ZODB/coptimizations.c:1.23	Fri Dec 13 16:56:05 2002
+++ ZODB3/ZODB/coptimizations.c	Tue Dec 23 14:05:53 2003
@@ -16,24 +16,16 @@
 "\n"
 "$Id$\n";
 
-#include "Python.h"
-#define DONT_USE_CPERSISTENCECAPI
 #include "cPersistence.h"
 
-static void PyVar_Assign(PyObject **v, PyObject *e) { Py_XDECREF(*v); *v=e;}
-#define ASSIGN(V,E) PyVar_Assign(&(V),(E))
-#define UNLESS(E) if(!(E))
-#define UNLESS_ASSIGN(V,E) ASSIGN(V,E); UNLESS(V)
-#define OBJECT(O) ((PyObject*)(O))
-
 static PyObject *py__p_oid, *py__p_jar, *py___getinitargs__, *py___module__;
 static PyObject *py_new_oid, *py___class__, *py___name__;
 
 static PyObject *InvalidObjectReference;
 
 typedef struct {
-  PyObject_HEAD
-  PyObject *jar, *stack, *new_oid;
+    PyObject_HEAD
+    PyObject *jar, *stack, *new_oid;
 } persistent_id;
 
 static PyTypeObject persistent_idType;
@@ -74,25 +66,15 @@
 {
     PyObject *class = NULL;
 
-    if (!PyExtensionClass_Check(object)) {
-	if (PyExtensionInstance_Check(object)) {
-	    class = PyObject_GetAttr(object, py___class__);
-	    if (!class) {
-		PyErr_Clear();
-		return 0;
-	    }
-	    /* The __class__ must be an extension class. */
-	    if (!(((PyExtensionClass*)class)->class_flags 
-		  & PERSISTENT_TYPE_FLAG)) {
-		Py_DECREF(class);
-		return 0;
-	    }
-	}
-	else
-	    /* Most objects will exit via this path.  They are neither
-	       extension classes nor instances of them.
-	    */
+    if (!PyType_Check(object)) {
+	if (!PER_TypeCheck(object)) 
 	    return 0;
+
+	class = PyObject_GetAttr(object, py___class__);
+	if (!class) {
+	    PyErr_Clear();
+	    return 0;
+	}
     }
     *out_class = class;
     return 1;
@@ -191,9 +173,41 @@
 	PyErr_Clear();
 	goto return_none;
     }
-
     if (oid != Py_None) {
-	PyObject *jar = PyObject_GetAttr(object, py__p_jar);
+	PyObject *jar;
+
+	if (!PyString_Check(oid)) {
+	    /* If the object is a class, then asking for _p_oid or
+	       _p_jar will return a descriptor.  There is no API to
+	       ask whether something is a descriptor; the best you
+	       can do is call anything with an __get__ a descriptor.
+
+	       The getattr check is potentially expensive so do the
+	       cheap PyString_Check() first, assuming that most oids
+	       that aren't None are real oids.  ZODB always uses
+	       strings, although some other user of Persistent could
+	       use something else.
+	    */
+	    static PyObject *__get__;
+	    PyObject *descr;
+	    if (!__get__) {
+		__get__ = PyString_InternFromString("__get__");
+		if (!__get__)
+		    goto err;
+	    }
+	    descr = PyObject_GetAttr(oid, __get__);
+	    if (descr) {
+		Py_DECREF(descr);
+		goto return_none;
+	    }
+	    /* Otherwise it's not a descriptor and it's just some
+	       weird value.  Maybe we'll get an error later.
+	    */
+
+	    /* XXX should check that this was an AttributeError */
+	    PyErr_Clear();
+	}
+	jar = PyObject_GetAttr(object, py__p_jar);
 	if (!jar)
 	    PyErr_Clear();
 	else {
@@ -217,8 +231,7 @@
 	    goto err;
     }
 
-    if (PyExtensionClass_Check(object)
-	|| PyObject_HasAttr(klass, py___getinitargs__))
+    if (PyType_Check(object) || PyObject_HasAttr(klass, py___getinitargs__))
 	goto return_oid;
 
     t2 = get_class_tuple(klass, oid);
@@ -257,29 +270,28 @@
 
 static PyTypeObject persistent_idType = {
     PyObject_HEAD_INIT(NULL)
-    0,				/*ob_size*/
+    0,					/*ob_size*/
     "persistent_id",			/*tp_name*/
     sizeof(persistent_id),		/*tp_basicsize*/
-    0,				/*tp_itemsize*/
-    /* methods */
+    0,					/*tp_itemsize*/
     (destructor)persistent_id_dealloc,	/*tp_dealloc*/
-    (printfunc)0,	/*tp_print*/
-    (getattrfunc)0,		/*obsolete tp_getattr*/
-    (setattrfunc)0,		/*obsolete tp_setattr*/
-    (cmpfunc)0,	/*tp_compare*/
-    (reprfunc)0,		/*tp_repr*/
-    0,		/*tp_as_number*/
-    0,		/*tp_as_sequence*/
-    0,		/*tp_as_mapping*/
-    (hashfunc)0,		/*tp_hash*/
+    0,					/*tp_print*/
+    0,					/*tp_getattr*/
+    0,					/*tp_setattr*/
+    0,					/*tp_compare*/
+    0,					/*tp_repr*/
+    0,					/*tp_as_number*/
+    0,					/*tp_as_sequence*/
+    0,					/*tp_as_mapping*/
+    0,					/*tp_hash*/
     (ternaryfunc)persistent_id_call,	/*tp_call*/
-    (reprfunc)0,		/*tp_str*/
-    (getattrofunc)0,	/*tp_getattro*/
-    (setattrofunc)0,	/*tp_setattro*/
-    
-    /* Space for future expansion */
-    0L,0L,
+    0,					/*tp_str*/
+    0,					/*tp_getattro*/
+    0,					/*tp_setattro*/
+    0,					/* tp_as_buffer */
+    Py_TPFLAGS_DEFAULT,			/* tp_flags */
     "C implementation of the persistent_id function defined in Connection.py"
+    					/* tp_doc */
 };
 
 /* End of code for persistent_id objects */
@@ -297,7 +309,7 @@
 void
 initcoptimizations(void)
 {
-    PyObject *m, *d;
+    PyObject *m;
 
 #define make_string(S) if (! (py_ ## S=PyString_FromString(#S))) return
     make_string(_p_oid);
@@ -309,20 +321,23 @@
     make_string(new_oid);
 			
     /* Get InvalidObjectReference error */
-    UNLESS (m=PyString_FromString("ZODB.POSException")) return;
-    ASSIGN(m, PyImport_Import(m));
-    UNLESS (m) return;
-    ASSIGN(m, PyObject_GetAttrString(m, "InvalidObjectReference"));
-    UNLESS (m) return;
-    InvalidObjectReference=m;
+    m = PyImport_ImportModule("ZODB.POSException");
+    if (!m)
+	return;
+    InvalidObjectReference = PyObject_GetAttrString(m, 
+						    "InvalidObjectReference");
+    Py_DECREF(m);
+    if (!InvalidObjectReference)
+	return;
 
-    if (!ExtensionClassImported) 
+    cPersistenceCAPI = PyCObject_Import("persistent.cPersistence", "CAPI");
+    if (!cPersistenceCAPI)
 	return;
 
     m = Py_InitModule3("coptimizations", Module_Level__methods,
 		       coptimizations_doc_string);
-    d = PyModule_GetDict(m);
 
     persistent_idType.ob_type = &PyType_Type;
-    PyDict_SetItemString(d,"persistent_idType", OBJECT(&persistent_idType));
+    Py_INCREF((PyObject *)&persistent_idType);
+    PyModule_AddObject(m, "persistent_idType", (PyObject *)&persistent_idType);
 }


=== ZODB3/ZODB/conversionhack.py 1.4 => 1.4.114.1 ===
--- ZODB3/ZODB/conversionhack.py:1.4	Wed Aug 14 18:07:09 2002
+++ ZODB3/ZODB/conversionhack.py	Tue Dec 23 14:05:53 2003
@@ -12,7 +12,7 @@
 #
 ##############################################################################
 
-import PersistentMapping
+import persistent.mapping
 
 class fixer:
     def __of__(self, parent):
@@ -27,7 +27,7 @@
 hack=hack()
 
 def __basicnew__():
-    r=PersistentMapping.PersistentMapping()
+    r=persistent.mapping.PersistentMapping()
     r.__setstate__=fixer
     return r
 


=== ZODB3/ZODB/__init__.py 1.24 => 1.24.2.1 ===
--- ZODB3/ZODB/__init__.py:1.24	Thu Oct  2 14:17:19 2003
+++ ZODB3/ZODB/__init__.py	Tue Dec 23 14:05:53 2003
@@ -12,36 +12,14 @@
 #
 ##############################################################################
 
-__version__ = '3.2c1'
+__version__ = "3.3a1"
 
 import sys
-import cPersistence, Persistence
-from zLOG import register_subsystem
-
-# This is lame. Don't look. :(
-sys.modules['cPersistence'] = cPersistence
-
-Persistent = cPersistence.Persistent
-
-# Install Persistent and PersistentMapping in Persistence
-if not hasattr(Persistence, 'Persistent'):
-    Persistence.Persistent = Persistent
-    Persistent.__module__ = 'Persistence'
-    Persistence.Overridable = cPersistence.Overridable
-    Persistence.Overridable.__module__ = 'Persistence'
-    if not hasattr(Persistence, 'PersistentMapping'):
-        import PersistentMapping
-        sys.modules['PersistentMapping'] = PersistentMapping
-        sys.modules['BoboPOS'] = sys.modules['ZODB']
-        sys.modules['BoboPOS.PersistentMapping'] = PersistentMapping
-        PersistentMapping = PersistentMapping.PersistentMapping
-        from PersistentMapping import PersistentMapping
-        Persistence.PersistentMapping = PersistentMapping
-        PersistentMapping.__module__ = 'Persistence'
-        del PersistentMapping
-
-del cPersistence
 
+from persistent import TimeStamp
 from DB import DB
-
 import Transaction
+
+# Backward compat for old imports. I don't think TimeStamp should
+# really be in persistent anyway
+sys.modules['ZODB.TimeStamp'] = sys.modules['persistent.TimeStamp']


=== ZODB3/ZODB/Transaction.py 1.53 => 1.53.2.1 ===
--- ZODB3/ZODB/Transaction.py:1.53	Thu Oct  2 18:48:07 2003
+++ ZODB3/ZODB/Transaction.py	Tue Dec 23 14:05:53 2003
@@ -251,24 +251,28 @@
                 else:
                     self._finish_many(jars)
             except:
-                # Ugh, we got an got an error during commit, so we
-                # have to clean up.  First save the original exception
-                # in case the cleanup process causes another
-                # exception.
-                error = sys.exc_info()
-                try:
-                    self._commit_error(objects, ncommitted, jars, subjars)
-                except:
-                    LOG('ZODB', ERROR,
-                        "A storage error occured during transaction "
-                        "abort.  This shouldn't happen.",
-                        error=sys.exc_info())
-                raise error[0], error[1], error[2]
+                self._cleanup(objects, ncommitted, jars, subjars)
         finally:
             del objects[:] # clear registered
             if not subtransaction and self._id is not None:
                 free_transaction()
 
+    def _cleanup(self, objects, ncommitted, jars, subjars):
+        # Ugh, we got an got an error during commit, so we
+        # have to clean up.  First save the original exception
+        # in case the cleanup process causes another
+        # exception.
+        error = sys.exc_info()
+        try:
+            self._commit_error(objects, ncommitted, jars, subjars)
+        except:
+            LOG("ZODB", ERROR,
+                "A storage error occured during transaction "
+                "abort.  This shouldn't happen.",
+                error=sys.exc_info())
+        raise error[0], error[1], error[2]
+        
+
     def _get_jars(self, objects, subtransaction):
         # Returns a list of jars for this transaction.
 
@@ -426,7 +430,7 @@
 
     def note(self, text):
         if self.description:
-            self.description = "%s\n\n%s" % (self.description, test.strip())
+            self.description = "%s\n\n%s" % (self.description, text.strip())
         else:
             self.description = text.strip()
 


=== ZODB3/ZODB/POSException.py 1.21 => 1.21.2.1 ===
--- ZODB3/ZODB/POSException.py:1.21	Thu Oct  2 14:17:19 2003
+++ ZODB3/ZODB/POSException.py	Tue Dec 23 14:05:53 2003
@@ -51,13 +51,17 @@
         related to conflict.  The first is the revision of object that
         is in conflict, the second is the revision of that the current
         transaction read when it started.
+      data : string
+        The database record that failed to commit, used to put the
+        class name in the error message.
 
     The caller should pass either object or oid as a keyword argument,
     but not both of them.  If object is passed, it should be a
     persistent object with an _p_oid attribute.
     """
 
-    def __init__(self, message=None, object=None, oid=None, serials=None):
+    def __init__(self, message=None, object=None, oid=None, serials=None,
+                 data=None):
         if message is None:
             self.message = "database conflict error"
         else:
@@ -75,6 +79,14 @@
             assert self.oid is None
             self.oid = oid
 
+        if data is not None:
+            # avoid circular import chain
+            from ZODB.serialize import SimpleObjectReader
+            self.class_name = SimpleObjectReader().getClassName(data)
+##        else:
+##            if message != "data read conflict error":
+##                raise RuntimeError
+
         self.serials = serials
 
     def __str__(self):
@@ -119,13 +131,66 @@
                                serials=serials)
 
 class BTreesConflictError(ConflictError):
-    """A special subclass for BTrees conflict errors.
+    """A special subclass for BTrees conflict errors."""
 
-    These return an undocumented four-tuple.
-    """
-    def __init__(self, *btree_args):
-        ConflictError.__init__(self, message="BTrees conflict error")
-        self.btree = btree_args
+    msgs = [# 0; i2 or i3 bucket split; positions are all -1
+            'Conflicting bucket split',
+
+            # 1; keys the same, but i2 and i3 values differ, and both values
+            # differ from i1's value
+            'Conflicting changes',
+
+            # 2; i1's value changed in i2, but key+value deleted in i3
+            'Conflicting delete and change',
+
+            # 3; i1's value changed in i3, but key+value deleted in i2
+            'Conflicting delete and change',
+
+            # 4; i1 and i2 both added the same key, or both deleted the
+            # same key
+            'Conflicting inserts or deletes',
+
+            # 5;  i2 and i3 both deleted the same key
+            'Conflicting deletes',
+
+            # 6; i2 and i3 both added the same key
+            'Conflicting inserts',
+
+            # 7; i2 and i3 both deleted the same key, or i2 changed the value
+            # associated with a key and i3 deleted that key
+            'Conflicting deletes, or delete and change',
+
+            # 8; i2 and i3 both deleted the same key, or i3 changed the value
+            # associated with a key and i2 deleted that key
+            'Conflicting deletes, or delete and change',
+
+            # 9; i2 and i3 both deleted the same key
+            'Conflicting deletes',
+
+            # 10; i2 and i3 deleted all the keys, and didn't insert any,
+            # leaving an empty bucket; conflict resolution doesn't have
+            # enough info to unlink an empty bucket from its containing
+            # BTree correctly
+            'Empty bucket from deleting all keys',
+
+            # 11; conflicting changes in an internal BTree node
+            'Conflicting changes in an internal BTree node',
+            ]
+
+    def __init__(self, p1, p2, p3, reason):
+        self.p1 = p1
+        self.p2 = p2
+        self.p3 = p3
+        self.reason = reason
+
+    def __repr__(self):
+        return "BTreesConflictError(%d, %d, %d, %d)" % (self.p1,
+                                                        self.p2,
+                                                        self.p3,
+                                                        self.reason)
+    def __str__(self):
+        return "BTrees conflict error at %d/%d/%d: %s" % (
+            self.p1, self.p2, self.p3, self.msgs[self.reason])
 
 class DanglingReferenceError(TransactionError):
     """An object has a persistent reference to a missing object.


=== ZODB3/ZODB/Mount.py 1.19 => 1.19.36.1 ===
--- ZODB3/ZODB/Mount.py:1.19	Thu Feb  6 15:31:17 2003
+++ ZODB3/ZODB/Mount.py	Tue Dec 23 14:05:53 2003
@@ -16,9 +16,9 @@
 $Id$"""
 __version__='$Revision$'[11:-2]
 
-import thread, Persistence, Acquisition
+import thread, persistent, Acquisition
 from Acquisition import aq_base
-import ExtensionClass, string, time, sys
+import string, time, sys
 from POSException import MountedStorageError
 from zLOG import LOG, ERROR, INFO, WARNING
 
@@ -44,7 +44,7 @@
         return parent_db._classFactory(parent_conn, module, name)
 
 
-class MountPoint(Persistence.Persistent, Acquisition.Implicit):
+class MountPoint(persistent.Persistent, Acquisition.Implicit):
     '''The base class for a Zope object which, when traversed,
     accesses a different database.
     '''
@@ -88,7 +88,7 @@
         '''Gets the database object, usually by creating a Storage object
         and returning ZODB.DB(storage).
         '''
-        raise 'NotImplemented'
+        raise NotImplementedError
 
     def _getDB(self):
         '''Creates or opens a DB object.


=== ZODB3/ZODB/MappingStorage.py 1.9.40.5 => 1.9.40.6 ===
--- ZODB3/ZODB/MappingStorage.py:1.9.40.5	Tue Dec  2 02:10:31 2003
+++ ZODB3/ZODB/MappingStorage.py	Tue Dec 23 14:05:53 2003
@@ -26,7 +26,7 @@
 from ZODB import utils
 from ZODB import BaseStorage
 from ZODB import POSException
-from ZODB.TimeStamp import TimeStamp
+from persistent.TimeStamp import TimeStamp
 
 
 class MappingStorage(BaseStorage.BaseStorage):
@@ -81,7 +81,9 @@
                 old = self._index[oid]
                 oserial = old[:8]
                 if serial != oserial:
-                    raise POSException.ConflictError(serials=(oserial, serial))
+                    raise POSException.ConflictError(oid=oid,
+                                                     serials=(oserial, serial),
+                                                     data=data)
 
             self._tindex.append((oid, self._tid + data))
         finally:


=== ZODB3/ZODB/ExportImport.py 1.17 => 1.17.2.1 ===
--- ZODB3/ZODB/ExportImport.py:1.17	Thu Oct  2 19:58:01 2003
+++ ZODB3/ZODB/ExportImport.py	Tue Dec 23 14:05:53 2003
@@ -60,15 +60,11 @@
     def importFile(self, file, clue='', customImporters=None):
         # This is tricky, because we need to work in a transaction!
 
-        if type(file) is StringType:
-            file_name=file
-            file=open(file,'rb')
-        else:
-            try: file_name=file.name
-            except: file_name='(unknown)'
-        read=file.read
+        if isinstance(file, StringType):
+            file = open(file,'rb')
+        read = file.read
 
-        magic=read(4)
+        magic = read(4)
 
         if magic != 'ZEXP':
             if customImporters and customImporters.has_key(magic):
@@ -77,7 +73,8 @@
             raise POSException.ExportError, 'Invalid export header'
 
         t = self.getTransaction()
-        if clue: t.note(clue)
+        if clue:
+            t.note(clue)
 
         return_oid_list = []
         self.onCommitAction('_importDuringCommit', file, return_oid_list)
@@ -151,7 +148,6 @@
             pickler.dump(unpickler.load())
             pickler.dump(unpickler.load())
             p=newp.getvalue()
-            plen=len(p)
 
             store(oid, None, p, version, transaction)
 


=== ZODB3/ZODB/DemoStorage.py 1.21.2.6 => 1.21.2.7 ===
--- ZODB3/ZODB/DemoStorage.py:1.21.2.6	Tue Dec  2 02:10:31 2003
+++ ZODB3/ZODB/DemoStorage.py	Tue Dec 23 14:05:53 2003
@@ -83,7 +83,7 @@
 
 import base64, time, string
 from ZODB import POSException, BaseStorage, utils
-from TimeStamp import TimeStamp
+from persistent.TimeStamp import TimeStamp
 from cPickle import loads
 from BTrees import OOBTree
 
@@ -165,12 +165,13 @@
 
         self._lock_acquire()
         try:
-            v=self._vindex.get(src, None)
-            if v is None: return
+            v = self._vindex.get(src)
+            if v is None:
+                return
 
             newserial = self._tid
-            tindex=self._tindex
-            oids=[]
+            tindex = self._tindex
+            oids = []
             for r in v.values():
                 oid, pre, vdata, p, tid = r
                 assert vdata is not None
@@ -181,10 +182,10 @@
                     new_vdata = None
                 tindex.append([oid, r, new_vdata, p, self._tid])
 
-
             return self._tid, oids
 
-        finally: self._lock_release()
+        finally:
+            self._lock_release()
 
     def loadEx(self, oid, version):
         self._lock_acquire()
@@ -257,7 +258,8 @@
                     nv=old
 
                 if serial != tid:
-                    raise POSException.ConflictError(serials=(tid, serial))
+                    raise POSException.ConflictError(
+                        oid=oid, serials=(tid, serial), data=data)
 
             r = [oid, old, version and (version, nv) or None, data, self._tid]
             self._tindex.append(r)


=== ZODB3/ZODB/DB.py 1.55.2.1 => 1.55.2.2 ===
--- ZODB3/ZODB/DB.py:1.55.2.1	Tue Oct  7 01:10:32 2003
+++ ZODB3/ZODB/DB.py	Tue Dec 23 14:05:53 2003
@@ -84,8 +84,8 @@
             storage.load('\0\0\0\0\0\0\0\0','')
         except KeyError:
             # Create the database's root in the storage if it doesn't exist
-            import PersistentMapping
-            root = PersistentMapping.PersistentMapping()
+            from persistent.mapping import PersistentMapping
+            root = PersistentMapping()
             # Manually create a pickle for the root to put in the storage.
             # The pickle must be in the special ZODB format.
             file = cStringIO.StringIO()
@@ -267,9 +267,6 @@
 
     def close(self):
         self._storage.close()
-        for x, allocated in self._pools[1]:
-            for c in allocated:
-                c._breakcr()
 
     def commitVersion(self, source, destination='', transaction=None):
         if transaction is None:
@@ -277,7 +274,7 @@
         transaction.register(CommitVersion(self, source, destination))
 
     def exportFile(self, oid, file=None):
-        raise 'Not yet implemented'
+        raise NotImplementedError
 
     def getCacheDeactivateAfter(self):
         return self._cache_deactivate_after
@@ -301,7 +298,7 @@
         return self._version_pool_size
 
     def importFile(self, file):
-        raise 'Not yet implemented'
+        raise NotImplementedError
 
     def invalidate(self, tid, oids, connection=None, version=''):
         """Invalidate references to a given oid.


=== ZODB3/ZODB/Connection.py 1.100.2.8 => 1.100.2.9 ===
--- ZODB3/ZODB/Connection.py:1.100.2.8	Fri Dec 19 11:10:05 2003
+++ ZODB3/ZODB/Connection.py	Tue Dec 23 14:05:53 2003
@@ -15,17 +15,6 @@
 
 $Id$"""
 
-from cPickleCache import PickleCache
-from POSException import ConflictError, ReadConflictError, TransactionError
-from ExtensionClass import Base
-import ExportImport, TmpStore
-from coptimizations import new_persistent_id
-from ConflictResolution import ResolvedSerial
-from Transaction import Transaction, get_transaction
-from ZODB.utils import oid_repr, U64
-
-from cPickle import Unpickler, Pickler
-from cStringIO import StringIO
 import logging
 import sys
 import threading
@@ -38,20 +27,36 @@
     # builtin hasattr() swallows exceptions
     return getattr(obj, attr, _marker) is not _marker
 
-global_code_timestamp = 0
-
-def updateCodeTimestamp():
-    """Called after changes are made to persistence-based classes.
+from persistent import PickleCache
+from zLOG import LOG, ERROR, BLATHER, WARNING
 
-    Causes all connection caches to be re-created as the connections are
-    reopened.
+from ZODB.ConflictResolution import ResolvedSerial
+from ZODB.coptimizations import new_persistent_id
+from ZODB.ExportImport import ExportImport
+from ZODB.POSException \
+     import ConflictError, ReadConflictError, TransactionError
+from ZODB.TmpStore import TmpStore
+from ZODB.Transaction import Transaction, get_transaction
+from ZODB.utils import oid_repr, z64
+from ZODB.serialize \
+     import ObjectWriter, getClassMetadata, ConnectionObjectReader
+
+global_reset_counter = 0
+
+def resetCaches():
+    """Causes all connection caches to be reset as connections are reopened.
+    
+    Zope's refresh feature uses this.  When you reload Python modules,
+    instances of classes continue to use the old class definitions.
+    To use the new code immediately, the refresh feature asks ZODB to
+    clear caches by calling resetCaches().  When the instances are
+    loaded by subsequent connections, they will use the new class
+    definitions.
     """
-    global global_code_timestamp
-    global_code_timestamp = time()
-
-ExtensionKlass = Base.__class__
+    global global_reset_counter
+    global_reset_counter += 1
 
-class Connection(ExportImport.ExportImport, object):
+class Connection(ExportImport, object):
     """Object managers for individual object space.
 
     An object space is a version of collection of objects.  In a
@@ -65,9 +70,6 @@
     _code_timestamp = 0
     _transaction = None
 
-    # Experimental. Other connections can register to be closed
-    # when we close by putting something here.
-
     def __init__(self, version='', cache_size=400,
                  cache_deactivate_after=60, mvcc=True):
         """Create a new Connection"""
@@ -86,7 +88,7 @@
             self._cache.cache_drain_resistance = 100
         self._incrgc = self.cacheGC = cache.incrgc
         self._committed = []
-        self._code_timestamp = global_code_timestamp
+        self._reset_counter = global_reset_counter
         self._load_count = 0   # Number of objects unghosted
         self._store_count = 0  # Number of objects stored
 
@@ -148,86 +150,21 @@
             ver = ''
         return '<Connection at %08x%s>' % (id(self), ver)
 
-    def _breakcr(self):
-        # Persistent objects and the cache don't participate in GC.
-        # Explicitly remove references from the connection to its
-        # cache and to the root object, because they refer back to the
-        # connection.
-        if self._cache is not None:
-            self._cache.clear()
-        self._incrgc = None
-        self.cacheGC = None
-
     def __getitem__(self, oid):
         obj = self._cache.get(oid, None)
         if obj is not None:
             return obj
 
-        __traceback_info__ = (oid)
         p, serial = self._storage.load(oid, self._version)
-        __traceback_info__ = (oid, p)
-        file=StringIO(p)
-        unpickler=Unpickler(file)
-        unpickler.persistent_load=self._persistent_load
-
-        object = unpickler.load()
-
-        klass, args = object
-
-        if isinstance(klass, tuple):
-            module, name = klass
-            klass = self._db._classFactory(self, module, name)
-
-        if (args is None or
-            not args and not hasattr(klass,'__getinitargs__')):
-            object=klass.__basicnew__()
-        else:
-            object = klass(*args)
-            if klass is not ExtensionKlass:
-                object.__dict__.clear()
-
-        object._p_oid=oid
-        object._p_jar=self
-        object._p_changed=None
-        object._p_serial=serial
-
-        self._cache[oid] = object
-        return object
-
-    def _persistent_load(self, oid):
-        __traceback_info__=oid
-
-        if isinstance(oid, tuple):
-            # Quick instance reference.  We know all we need to know
-            # to create the instance wo hitting the db, so go for it!
-            oid, klass = oid
-            obj = self._cache.get(oid, None)
-            if obj is not None:
-                return obj
-
-            if isinstance(klass, tuple):
-                module, name = klass
-                try:
-                    klass=self._db._classFactory(self, module, name)
-                except:
-                    # Eek, we couldn't get the class. Hm.
-                    # Maybe their's more current data in the
-                    # object's actual record!
-                    return self[oid]
-
-            object=klass.__basicnew__()
-            object._p_oid=oid
-            object._p_jar=self
-            object._p_changed=None
-
-            self._cache[oid] = object
+        obj = self._reader.getGhost(p)
 
-            return object
+        obj._p_oid = oid
+        obj._p_jar = self
+        obj._p_changed = None
+        obj._p_serial = serial
 
-        obj = self._cache.get(oid, None)
-        if obj is not None:
-            return obj
-        return self[oid]
+        self._cache[oid] = obj
+        return obj
 
     def sortKey(self):
         # XXX will raise an exception if the DB hasn't been set
@@ -242,28 +179,30 @@
 
         Any objects modified since the last transaction are invalidated.
         """
-        self._db=odb
-        self._storage=s=odb._storage
+        self._db = odb
+        self._storage = odb._storage
         self._sortKey = odb._storage.sortKey
-        self.new_oid=s.new_oid
-        if self._code_timestamp != global_code_timestamp:
+        self.new_oid = odb._storage.new_oid
+        if self._reset_counter != global_reset_counter:
             # New code is in place.  Start a new cache.
             self._resetCache()
         else:
             self._flush_invalidations()
-        self._opened=time()
+        self._reader = ConnectionObjectReader(self, self._cache,
+                                              self._db._classFactory)
+        self._opened = time()
 
         return self
 
     def _resetCache(self):
-        '''
-        Creates a new cache, discarding the old.
-        '''
-        self._code_timestamp = global_code_timestamp
+        """Creates a new cache, discarding the old.
+
+        See the docstring for the resetCaches() function.
+        """
+        self._reset_counter = global_reset_counter
         self._invalidated.clear()
-        orig_cache = self._cache
-        orig_cache.clear()
-        self._cache = cache = PickleCache(self, orig_cache.cache_size)
+        cache_size = self._cache.cache_size
+        self._cache = cache = PickleCache(self, cache_size)
         self._incrgc = self.cacheGC = cache.incrgc
 
     def abort(self, object, transaction):
@@ -350,100 +289,31 @@
             # Nothing to do
             return
 
-        stack = [object]
-
-        # Create a special persistent_id that passes T and the subobject
-        # stack along:
-        #
-        # def persistent_id(object,
-        #                   self=self,
-        #                   stackup=stackup, new_oid=self.new_oid):
-        #     if (not hasattr(object, '_p_oid') or
-        #         type(object) is ClassType): return None
-        #
-        #     oid=object._p_oid
-        #
-        #     if oid is None or object._p_jar is not self:
-        #         oid = self.new_oid()
-        #         object._p_jar=self
-        #         object._p_oid=oid
-        #         stackup(object)
-        #
-        #     klass=object.__class__
-        #
-        #     if klass is ExtensionKlass: return oid
-        #
-        #     if hasattr(klass, '__getinitargs__'): return oid
-        #
-        #     module=getattr(klass,'__module__','')
-        #     if module: klass=module, klass.__name__
-        #
-        #     return oid, klass
-
-        file=StringIO()
-        seek=file.seek
-        pickler=Pickler(file,1)
-        pickler.persistent_id=new_persistent_id(self, stack)
-        dbstore=self._storage.store
-        file=file.getvalue
-        cache=self._cache
-        get=cache.get
-        dump=pickler.dump
-        clear_memo=pickler.clear_memo
-
-
-        version=self._version
-
-        while stack:
-            object=stack[-1]
-            del stack[-1]
-            oid=object._p_oid
-            serial=getattr(object, '_p_serial', '\0\0\0\0\0\0\0\0')
-            if serial == '\0\0\0\0\0\0\0\0':
+        w = ObjectWriter(object)
+        for obj in w:
+            oid = obj._p_oid
+            serial = getattr(obj, '_p_serial', z64)
+            if serial == z64:
                 # new object
                 self._creating.append(oid)
             else:
                 #XXX We should never get here
                 if invalid(oid) and not hasattr(object, '_p_resolveConflict'):
-                    raise ConflictError(object=object)
+                    raise ConflictError(object=obj)
                 self._modified.append(oid)
 
-            klass = object.__class__
-
-            if klass is ExtensionKlass:
-                # Yee Ha!
-                dict={}
-                dict.update(object.__dict__)
-                del dict['_p_jar']
-                args=object.__name__, object.__bases__, dict
-                state=None
-            else:
-                if hasattr(klass, '__getinitargs__'):
-                    args = object.__getinitargs__()
-                    len(args) # XXX Assert it's a sequence
-                else:
-                    args = None # New no-constructor protocol!
-
-                module=getattr(klass,'__module__','')
-                if module: klass=module, klass.__name__
-                __traceback_info__=klass, oid, self._version
-                state=object.__getstate__()
-
-            seek(0)
-            clear_memo()
-            dump((klass,args))
-            dump(state)
-            p=file(1)
-            s=dbstore(oid,serial,p,version,transaction)
+            p = w.serialize(obj)
+            s = self._storage.store(oid, serial, p, self._version, transaction)
             self._store_count = self._store_count + 1
             # Put the object in the cache before handling the
             # response, just in case the response contains the
             # serial number for a newly created object
-            try: cache[oid]=object
+            try:
+                self._cache[oid] = obj
             except:
                 # Dang, I bet its wrapped:
-                if hasattr(object, 'aq_base'):
-                    cache[oid]=object.aq_base
+                if hasattr(obj, 'aq_base'):
+                    self._cache[oid] = obj.aq_base
                 else:
                     raise
 
@@ -466,7 +336,6 @@
         load=src.load
         store=tmp.store
         dest=self._version
-        get=self._cache.get
         oids=src._index.keys()
 
         # Copy invalidating and creating info from temporary storage:
@@ -560,7 +429,7 @@
         self.getTransaction().register(object)
 
     def root(self):
-        return self['\0\0\0\0\0\0\0\0']
+        return self[z64]
 
     def setstate(self, obj):
         oid = obj._p_oid
@@ -628,7 +497,8 @@
                 self._conflicts[obj._p_oid] = 1
                 raise ReadConflictError(object=obj)
 
-        self._set_ghost_state(obj, p, serial)
+        self._reader.setGhostState(obj, p)
+        obj._p_serial = serial
 
     def _setstate_noncurrent(self, obj):
         """Set state using non-current data.
@@ -652,21 +522,8 @@
         assert start < self._txn_time <= end, \
                (U64(start), U64(self._txn_time), U64(end))
         self._noncurrent[obj._p_oid] = True
-        self._set_ghost_state(obj, data, start)
-
-    def _set_ghost_state(self, obj, p, serial):
-        file = StringIO(p)
-        unpickler = Unpickler(file)
-        unpickler.persistent_load = self._persistent_load
-        unpickler.load()
-        state = unpickler.load()
-
-        setstate = getattr(obj, "__setstate__", None)
-        if setstate is None:
-            obj.update(state)
-        else:
-            setstate(state)
-        obj._p_serial = serial
+        self._reader.setGhostState(obj, data)
+        obj._p_serial = start
 
     def _handle_independent(self, obj):
         # Helper method for setstate() handles possibly independent objects
@@ -686,42 +543,27 @@
             self.getTransaction().register(obj)
             raise ReadConflictError(object=obj)
 
-    def oldstate(self, object, serial):
-        oid=object._p_oid
-        p = self._storage.loadSerial(oid, serial)
-        file=StringIO(p)
-        unpickler=Unpickler(file)
-        unpickler.persistent_load=self._persistent_load
-        unpickler.load()
-        return  unpickler.load()
-
-    def setklassstate(self, object):
+    def oldstate(self, obj, serial):
+        p = self._storage.loadSerial(obj._p_oid, serial)
+        return self._reader.getState(p)
+
+    def setklassstate(self, obj):
+        # Special case code to handle ZClasses, I think.
+        # Called the cache when an object of type type is invalidated.
         try:
-            oid=object._p_oid
-            __traceback_info__=oid
+            oid = obj._p_oid
             p, serial = self._storage.load(oid, self._version)
-            file=StringIO(p)
-            unpickler=Unpickler(file)
-            unpickler.persistent_load=self._persistent_load
-
-            copy = unpickler.load()
-
-            klass, args = copy
-
-            if klass is not ExtensionKlass:
-                self._log.error(
-                    "Unexpected klass when setting class state on %s",
-                    getattr(object, "__name__", "(?)"))
-                return
-
-            copy = klass(*args)
-            object.__dict__.clear()
-            object.__dict__.update(copy.__dict__)
-
-            object._p_oid=oid
-            object._p_jar=self
-            object._p_changed=0
-            object._p_serial=serial
+
+            # We call getGhost(), but we actually get a non-ghost back.
+            # The object is a class, which can't actually be ghosted.
+            copy = self._reader.getGhost(p)
+            obj.__dict__.clear()
+            obj.__dict__.update(copy.__dict__)
+
+            obj._p_oid = oid
+            obj._p_jar = self
+            obj._p_changed = 0
+            obj._p_serial = serial
         except:
             self._log.error("setklassstate failed", exc_info=sys.exc_info())
             raise
@@ -741,7 +583,7 @@
         if sub:
             # Sub-transaction!
             if self._tmp is None:
-                _tmp = TmpStore.TmpStore(self._version)
+                _tmp = TmpStore(self._version)
                 self._tmp = self._storage
                 self._storage = _tmp
                 _tmp.registerDB(self._db, 0)
@@ -857,8 +699,3 @@
         new._p_changed=1
         self.getTransaction().register(new)
         self._cache[oid]=new
-
-class tConnection(Connection):
-
-    def close(self):
-        self._breakcr()


=== ZODB3/ZODB/ConflictResolution.py 1.19 => 1.19.2.1 ===
--- ZODB3/ZODB/ConflictResolution.py:1.19	Thu Oct  2 19:56:55 2003
+++ ZODB3/ZODB/ConflictResolution.py	Tue Dec 23 14:05:53 2003
@@ -14,6 +14,7 @@
 import sys
 from cStringIO import StringIO
 from cPickle import Unpickler, Pickler
+from pickle import PicklingError
 
 from ZODB.POSException import ConflictError
 import zLOG
@@ -47,7 +48,7 @@
         return "PR(%s %s)" % (id(self), self.data)
 
     def __getstate__(self):
-        raise "Can't pickle PersistentReference"
+        raise PicklingError, "Can't pickle PersistentReference"
 
 class PersistentReferenceFactory:
 
@@ -88,14 +89,15 @@
         file = StringIO(newpickle)
         unpickler = Unpickler(file)
         unpickler.persistent_load = prfactory.persistent_load
-        class_tuple = unpickler.load()[0]
+        meta = unpickler.load()
+        class_tuple = meta[0]
         if bad_class(class_tuple):
             return None
         newstate = unpickler.load()
         klass = load_class(class_tuple)
         if klass is None:
             return None
-        inst = klass.__basicnew__()
+        inst = klass.__new__(klass)
 
         try:
             resolve = inst._p_resolveConflict
@@ -111,7 +113,7 @@
         file = StringIO()
         pickler = Pickler(file,1)
         pickler.persistent_id = persistent_id
-        pickler.dump(class_tuple)
+        pickler.dump(meta)
         pickler.dump(resolved)
         return file.getvalue(1)
     except ConflictError:


=== ZODB3/ZODB/BaseStorage.py 1.36.2.5 => 1.36.2.6 ===
--- ZODB3/ZODB/BaseStorage.py:1.36.2.5	Tue Dec  2 02:10:30 2003
+++ ZODB3/ZODB/BaseStorage.py	Tue Dec 23 14:05:53 2003
@@ -16,9 +16,13 @@
 $Id$
 """
 import cPickle
+import threading
 import time
 
-import ThreadLock
+import UndoLogCompatible
+import POSException
+from persistent.TimeStamp import TimeStamp
+
 import zLOG
 from ZODB import bpthread
 from ZODB import POSException
@@ -37,12 +41,12 @@
                  "create storage %s" % self.__name__)
 
         # Allocate locks:
-        l=ThreadLock.allocate_lock()
-        self._lock_acquire=l.acquire
-        self._lock_release=l.release
-        l=bpthread.allocate_lock()
-        self._commit_lock_acquire=l.acquire
-        self._commit_lock_release=l.release
+        l = threading.RLock()
+        self._lock_acquire = l.acquire
+        self._lock_release = l.release
+        l = threading.Lock()
+        self._commit_lock_acquire = l.acquire
+        self._commit_lock_release = l.release
 
         t=time.time()
         t=self._ts=apply(TimeStamp,(time.gmtime(t)[:5]+(t%60,)))

=== Removed File ZODB3/ZODB/cPersistence.h ===

=== Removed File ZODB3/ZODB/cPersistence.c ===

=== Removed File ZODB3/ZODB/PersistentMapping.py ===

=== Removed File ZODB3/ZODB/PersistentList.py ===




More information about the Zodb-checkins mailing list