[Zope3-checkins] CVS: Zope3/src/zodb - __init__.py:1.2 _timestamp.c:1.2 config.py:1.2 conflict.py:1.2 connection.py:1.2 db.py:1.2 dbdump.py:1.2 export.py:1.2 interfaces.py:1.2 lockfile.py:1.2 serialize.py:1.2 timestamp.py:1.2 utils.py:1.2 winlock.c:1.2 ztransaction.py:1.2

Jim Fulton jim@zope.com
Wed, 25 Dec 2002 09:13:48 -0500


Update of /cvs-repository/Zope3/src/zodb
In directory cvs.zope.org:/tmp/cvs-serv15352/src/zodb

Added Files:
	__init__.py _timestamp.c config.py conflict.py connection.py 
	db.py dbdump.py export.py interfaces.py lockfile.py 
	serialize.py timestamp.py utils.py winlock.c ztransaction.py 
Log Message:
Grand renaming:

- Renamed most files (especially python modules) to lower case.

- Moved views and interfaces into separate hierarchies within each
  project, where each top-level directory under the zope package
  is a separate project.

- Moved everything to src from lib/python.

  lib/python will eventually go away. I need access to the cvs
  repository to make this happen, however.

There are probably some bits that are broken. All tests pass
and zope runs, but I haven't tried everything. There are a number
of cleanups I'll work on tomorrow.



=== Zope3/src/zodb/__init__.py 1.1 => 1.2 ===
--- /dev/null	Wed Dec 25 09:13:47 2002
+++ Zope3/src/zodb/__init__.py	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,13 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################


=== Zope3/src/zodb/_timestamp.c 1.1 => 1.2 ===
--- /dev/null	Wed Dec 25 09:13:47 2002
+++ Zope3/src/zodb/_timestamp.c	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,415 @@
+/*****************************************************************************
+
+  Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
+  
+  This software is subject to the provisions of the Zope Public License,
+  Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+  FOR A PARTICULAR PURPOSE
+  
+ ****************************************************************************/
+
+#include "Python.h"
+#include <time.h>
+
+PyObject *TimeStamp_FromDate(int, int, int, int, int, double);
+PyObject *TimeStamp_FromString(const char *);
+
+static char TimeStampModule_doc[] = 
+"A 64-bit TimeStamp used as a ZODB serial number.\n";
+
+typedef struct {
+    PyObject_HEAD
+    unsigned char data[8];
+} TimeStamp;
+
+/* The first dimension of the arrays below is non-leapyear / leapyear */
+
+static char month_len[2][12]={
+  {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, 
+  {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}
+};
+
+static short joff[2][12] = {
+  {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334},
+  {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335}
+};
+
+static double gmoff=0;
+
+/* XXX should this be stored in sconv? */
+#define SCONV ((double)60) / ((double)(1<<16)) / ((double)(1<<16))
+
+static int
+leap(int year)
+{
+    return year % 4 == 0 && (year % 100 != 0 || year % 400 == 0);
+}
+
+static int
+days_in_month(int year, int month)
+{
+    return month_len[leap(year)][month];
+}
+
+static double
+TimeStamp_yad(int y)
+{
+    double d, s;
+
+    y -= 1900;
+  
+    d = (y - 1) * 365;
+    if (y > 0) {
+        s = 1.0;
+	y -= 1;
+    } else {
+	s = -1.0;
+	y = -y;
+    }
+    return d + s * (y / 4 - y / 100 + (y + 300) / 400);
+}
+
+static double
+TimeStamp_abst(int y, int mo, int d, int m, int s)
+{
+    return (TimeStamp_yad(y) + joff[leap(y)][mo] + d) * 86400 + m * 60 + s;
+}
+
+static int
+TimeStamp_init_gmoff(void)
+{
+    struct tm *t;
+    time_t z=0;
+  
+    t = gmtime(&z);
+    if (t == NULL) {
+	PyErr_SetString(PyExc_SystemError, "gmtime failed");
+	return -1;
+    }
+
+    gmoff = TimeStamp_abst(t->tm_year+1900, t->tm_mon, t->tm_mday - 1, 
+			   t->tm_hour * 60 + t->tm_min, t->tm_sec);
+
+    return 0;
+}
+
+static void
+TimeStamp_dealloc(TimeStamp *ts)
+{
+    PyObject_Del(ts);
+}
+
+static int
+TimeStamp_compare(TimeStamp *v, TimeStamp *w)
+{
+    int cmp = memcmp(v->data, w->data, 8);
+    if (cmp < 0) return -1;
+    if (cmp > 0) return 1;
+    return 0;
+}
+
+static long
+TimeStamp_hash(TimeStamp *self)
+{
+    register unsigned char *p = (unsigned char *)self->data;
+    register int len = 8;
+    register long x = *p << 7;
+    /* XXX unroll loop? */
+    while (--len >= 0)
+	x = (1000003*x) ^ *p++;
+    x ^= 8;
+    if (x == -1)
+	x = -2;
+    return x;
+}
+
+typedef struct {
+    int y, m, d, mi;
+} TimeStampParts;
+  
+static void 
+TimeStamp_unpack(TimeStamp *self, TimeStampParts *p)
+{
+    unsigned long v;
+
+    v = (self->data[0] * 16777216 + self->data[1] * 65536 
+	 + self->data[2] * 256 + self->data[3]);
+    p->y = v / 535680 + 1900;
+    p->m = (v % 535680) / 44640 + 1;
+    p->d = (v % 44640) / 1440 + 1;
+    p->mi = v % 1440;
+}
+
+static double
+TimeStamp_sec(TimeStamp *self)
+{
+    unsigned int v;
+
+    v = (self->data[4] * 16777216 + self->data[5] * 65536
+	 + self->data[6] * 256 + self->data[7]);
+    return SCONV * v;
+}
+
+static PyObject *
+TimeStamp_year(TimeStamp *self)
+{
+    TimeStampParts p;
+    TimeStamp_unpack(self, &p);
+    return PyInt_FromLong(p.y);
+}
+
+static PyObject *
+TimeStamp_month(TimeStamp *self)
+{
+    TimeStampParts p;
+    TimeStamp_unpack(self, &p);
+    return PyInt_FromLong(p.m);
+}
+
+static PyObject *
+TimeStamp_day(TimeStamp *self)
+{
+    TimeStampParts p;
+    TimeStamp_unpack(self, &p);
+    return PyInt_FromLong(p.d);
+}
+
+static PyObject *
+TimeStamp_hour(TimeStamp *self)
+{
+    TimeStampParts p;
+    TimeStamp_unpack(self, &p);
+    return PyInt_FromLong(p.mi / 60);
+}
+
+static PyObject *
+TimeStamp_minute(TimeStamp *self)
+{
+    TimeStampParts p;
+    TimeStamp_unpack(self, &p);
+    return PyInt_FromLong(p.mi % 60);
+}
+
+static PyObject *
+TimeStamp_second(TimeStamp *self)
+{
+    return PyFloat_FromDouble(TimeStamp_sec(self));
+}
+
+static PyObject *
+TimeStamp_timeTime(TimeStamp *self)
+{
+    TimeStampParts p;
+    TimeStamp_unpack(self, &p);
+    return PyFloat_FromDouble(TimeStamp_abst(p.y, p.m - 1, p.d - 1, p.mi, 0)
+			      + TimeStamp_sec(self) - gmoff);
+}
+
+static PyObject *
+TimeStamp_raw(TimeStamp *self)
+{
+    return PyString_FromStringAndSize(self->data, 8);
+}
+
+static PyObject *
+TimeStamp_laterThan(TimeStamp *self, PyObject *obj)
+{
+    TimeStamp *o = NULL;
+    TimeStampParts p;
+    unsigned char new[8];
+    int i;
+
+    if (obj->ob_type != self->ob_type) {
+	PyErr_SetString(PyExc_TypeError, "expected TimeStamp object");
+	return NULL;
+    }
+    o = (TimeStamp *)obj;
+    if (memcmp(self->data, o->data, 8) > 0) {
+	Py_INCREF(self);
+	return (PyObject *)self;
+    }
+
+    memcpy(new, o->data, 8);
+    for (i = 7; i > 3; i--) {
+	if (new[i] == 255)
+	    new[i] = 0;
+	else {
+	    new[i]++;
+	    return TimeStamp_FromString(new);
+	}
+    }
+
+    /* All but the first two bytes are the same.  Need to increment
+       the year, month, and day explicitly. */
+    TimeStamp_unpack(o, &p);
+    if (p.mi >= 1439) {
+	p.mi = 0;
+	if (p.d == month_len[leap(p.y)][p.m - 1]) {
+	    p.d = 1;
+	    if (p.m == 12) {
+		p.m = 1;
+		p.y++;
+	    } else
+		p.m++;
+	} else
+	    p.d++;
+    } else
+	p.mi++;
+
+    return TimeStamp_FromDate(p.y, p.m, p.d, p.mi / 60, p.mi % 60, 0);
+}
+
+static struct PyMethodDef TimeStamp_methods[] = {
+    {"year", 	(PyCFunction)TimeStamp_year, 	METH_NOARGS},
+    {"minute", 	(PyCFunction)TimeStamp_minute, 	METH_NOARGS},
+    {"month", 	(PyCFunction)TimeStamp_month, 	METH_NOARGS},
+    {"day", 	(PyCFunction)TimeStamp_day,	METH_NOARGS},
+    {"hour", 	(PyCFunction)TimeStamp_hour, 	METH_NOARGS},
+    {"second", 	(PyCFunction)TimeStamp_second, 	METH_NOARGS},
+    {"timeTime",(PyCFunction)TimeStamp_timeTime, 	METH_NOARGS},
+    {"laterThan", (PyCFunction)TimeStamp_laterThan, 	METH_O},
+    {"raw",	(PyCFunction)TimeStamp_raw,	METH_NOARGS},
+    {NULL,	NULL},
+};
+
+static PyTypeObject TimeStamp_type = {
+    PyObject_HEAD_INIT(NULL)
+    0,
+    "TimeStamp",
+    sizeof(TimeStamp),
+    0,
+    (destructor)TimeStamp_dealloc,	/* tp_dealloc */
+    0,					/* tp_print */
+    0,					/* tp_getattr */
+    0,					/* tp_setattr */
+    (cmpfunc)TimeStamp_compare,		/* tp_compare */
+    0,					/* tp_repr */
+    0,					/* tp_as_number */
+    0,					/* tp_as_sequence */
+    0,					/* tp_as_mapping */
+    (hashfunc)TimeStamp_hash,		/* tp_hash */
+    0,					/* tp_call */
+    0,					/* tp_str */
+    0,					/* tp_getattro */
+    0,					/* tp_setattro */
+    0,					/* tp_as_buffer */
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+    0,					/* tp_doc */
+    0,					/* tp_traverse */
+    0,					/* tp_clear */
+    0,					/* tp_richcompare */
+    0,					/* tp_weaklistoffset */
+    0,					/* tp_iter */
+    0,					/* tp_iternext */
+    TimeStamp_methods,			/* tp_methods */
+    0,					/* tp_members */
+    0,					/* tp_getset */
+    0,					/* tp_base */
+    0,					/* tp_dict */
+    0,					/* tp_descr_get */
+    0,					/* tp_descr_set */
+};
+
+PyObject *
+TimeStamp_FromString(const char *buf)
+{
+    /* buf must be exactly 8 characters */
+    TimeStamp *ts = (TimeStamp *)PyObject_New(TimeStamp, &TimeStamp_type);
+    memcpy(ts->data, buf, 8);
+    return (PyObject *)ts;
+}
+
+#define CHECK_RANGE(VAR, LO, HI) if ((VAR) < (LO) || (VAR) > (HI)) { \
+     return PyErr_Format(PyExc_ValueError, \
+			 # VAR " must be between %d and %d: %d", \
+			 (LO), (HI), (VAR)); \
+    }
+
+PyObject *
+TimeStamp_FromDate(int year, int month, int day, int hour, int min, 
+		   double sec)
+{
+    TimeStamp *ts = NULL;
+    int d;
+    unsigned int v;
+
+    if (year < 1900) 
+	return PyErr_Format(PyExc_ValueError,
+			    "year must be greater than 1900: %d", year);
+    CHECK_RANGE(month, 1, 12);
+    d = days_in_month(year, month - 1);
+    if (day < 1 || day > d)
+	return PyErr_Format(PyExc_ValueError,
+			    "day must be between 1 and %d: %d", d, day);
+    CHECK_RANGE(hour, 0, 23);
+    CHECK_RANGE(min, 0, 59);
+    /* Seconds are allowed to be anything, so chill
+       If we did want to be pickly, 60 would be a better choice.
+    if (sec < 0 || sec > 59)
+	return PyErr_Format(PyExc_ValueError,
+			    "second must be between 0 and 59: %f", sec);
+    */
+    ts = (TimeStamp *)PyObject_New(TimeStamp, &TimeStamp_type);
+    v = (((year - 1900) * 12 + month - 1) * 31 + day - 1);
+    v = (v * 24 + hour) * 60 + min;
+    ts->data[0] = v / 16777216;
+    ts->data[1] = (v % 16777216) / 65536;
+    ts->data[2] = (v % 65536) / 256;
+    ts->data[3] = v % 256;
+    sec /= SCONV;
+    v = (unsigned int)sec;
+    ts->data[4] = v / 16777216;
+    ts->data[5] = (v % 16777216) / 65536;
+    ts->data[6] = (v % 65536) / 256;
+    ts->data[7] = v % 256;
+
+    return (PyObject *)ts;
+}
+
+PyObject *
+TimeStamp_TimeStamp(PyObject *obj, PyObject *args)
+{
+    char *buf = NULL;
+    int len = 0, y, mo, d, h = 0, m = 0;
+    double sec = 0;
+
+    if (PyArg_ParseTuple(args, "s#:TimeStamp", &buf, &len)) {
+	if (len != 8) {
+	    PyErr_SetString(PyExc_ValueError, "8-character string expected");
+	    return NULL;
+	}
+	return TimeStamp_FromString(buf);
+    }
+    PyErr_Clear();
+
+    if (!PyArg_ParseTuple(args, "iii|iid", &y, &mo, &d, &h, &m, &sec))
+	return NULL;
+    return TimeStamp_FromDate(y, mo, d, h, m, sec);
+}
+
+static PyMethodDef TimeStampModule_functions[] = {
+    {"TimeStamp",	TimeStamp_TimeStamp,	METH_VARARGS},
+    {NULL,		NULL},
+};
+
+
+void
+init_timestamp(void)
+{
+    PyObject *m;
+
+    if (TimeStamp_init_gmoff() < 0)
+	return;
+
+    m = Py_InitModule4("_timestamp", TimeStampModule_functions,
+		       TimeStampModule_doc, NULL, PYTHON_API_VERSION);
+    if (m == NULL)
+	return;
+
+    TimeStamp_type.ob_type = &PyType_Type;
+    TimeStamp_type.tp_getattro = PyObject_GenericGetAttr;
+}
+ 


=== Zope3/src/zodb/config.py 1.1 => 1.2 ===
--- /dev/null	Wed Dec 25 09:13:47 2002
+++ Zope3/src/zodb/config.py	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,190 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE
+#
+##############################################################################
+"""Default storage types.
+
+Adapted from DBTab/StorageTypes.py.
+"""
+
+import re
+
+from ZConfig.Config import asBoolean
+
+
+def convertFileStorageArgs(quota=None, stop=None, **kw):
+    if kw.has_key('name'):
+        # FileStorage doesn't accept a 'name' arg
+        del kw['name']
+    if quota is not None:
+        kw['quota'] = long(quota) or None
+    if stop is not None:
+        stop = long(stop)
+        if not stop:
+            stop = None
+        else:
+            from zodb.utils import p64
+            stop = p64(stop)
+        kw['stop'] = stop
+
+    # Boolean args
+    for name in (
+        'create', 'read_only'
+        ):
+        if kw.has_key(name):
+            kw[name] = asBoolean(kw[name])
+
+    return kw
+
+
+# Match URLs of the form 'zeo://zope.example.com:1234'
+zeo_url_re = re.compile('zeo:/*(?P<host>[A-Za-z0-9\.-]+):(?P<port>[0-9]+)')
+
+def convertAddresses(s):
+    # Allow multiple addresses using semicolons as a split character.
+    res = []
+    for a in s.split(';'):
+        a = a.strip()
+        if a:
+            mo = zeo_url_re.match(a)
+            if mo is not None:
+                # ZEO URL
+                host, port = mo.groups()
+                res.append((host, int(port)))
+            else:
+                # Socket file
+                res.append(a)
+    return res
+
+
+def convertClientStorageArgs(addr=None, **kw):
+    if addr is None:
+        raise RuntimeError, 'An addr parameter is required for ClientStorage.'
+    kw['addr'] = convertAddresses(addr)
+
+    # Integer args
+    for name in (
+        'cache_size', 'min_disconnect_poll', 'max_disconnect_poll',
+        ):
+        if kw.has_key(name):
+            kw[name] = int(kw[name])
+
+    # Boolean args
+    for name in (
+        'wait', 'read_only', 'read_only_fallback',
+        ):
+        if kw.has_key(name):
+            kw[name] = asBoolean(kw[name])
+
+    # The 'client' parameter must be None to be false.  Yuck.
+    if kw.has_key('client') and not kw['client']:
+        kw['client'] = None
+
+    return kw
+
+
+# Currently unused
+def convertBDBStorageArgs(**kw):
+    from zodb.storage.base import BerkeleyConfig
+    config = BerkeleyConfig()
+    for name in dir(BerkeleyConfig):
+        if name.startswith('_'):
+            continue
+        val = kw.get(name)
+        if val is not None:
+            if name == 'read_only':
+                val = asBoolean(val)
+            elif name != 'logdir':
+                val = int(val)
+            setattr(config, name, val)
+            del kw[name]
+    # XXX: Nobody ever passes in env
+    assert not kw.has_key('env')
+    kw['config'] = config
+    return kw
+
+
+storage_types = {
+    # A mapping from "type" (i.e. class) to 2-tuple of (module, converter).
+    # converter may be None for no conversion necessary.  type and module are
+    # both strings, and module should be the dotted path for use in an
+    # __import__().
+    'FileStorage'      : ('zodb.storage.file', convertFileStorageArgs),
+    'MappingStorage'   : ('zodb.storage.mapping', None),
+    'ClientStorage'    : ('zodb.zeo.client', convertClientStorageArgs),
+    'BDBFullStorage'   : ('zodb.storage.bdbfull', convertBDBStorageArgs),
+    'BDBMinimalStorage': ('zodb.storage.bdbminimal', convertBDBStorageArgs),
+    }
+
+
+"""Higher-level support for configuring storages.
+
+Storages are configured a la DBTab.
+
+A storage section has the form
+
+  <Storage Name (dependent)>
+    # For example
+    type        FileStorage
+    file_name   var/Data.fs
+    read_only   1
+  </Storage>
+
+where Name and (dependent) are optional.  Once you have retrieved the
+section object (probably with getSection("Storage", name), the
+function creatStorage() in this module will create the storage object
+for you.
+"""
+
+
+
+def createStorage(section):
+    """Create a storage specified by a configuration section."""
+    klass, args = getStorageInfo(section)
+    return klass(**args)
+
+def getStorageInfo(section):
+    """Extract a storage description from a configuration section.
+
+    Return a tuple (klass, args) where klass is the storage class and
+    args is a dictionary of keyword arguments.  To create the storage,
+    call klass(**args).
+
+    Adapted from DatabaseFactory.setStorageParams() in DBTab.py.
+    """
+    type = section.get("type")
+    if not type:
+        raise RuntimeError, "A storage type is required"
+    module = None
+    pos = type.rfind(".")
+    if pos >= 0:
+        # Specified the module
+        module, type = type[:pos], type[pos+1:]
+    converter = None
+    if not module:
+        # Use a default module and argument converter.
+        info = storage_types.get(type)
+        if not info:
+            raise RuntimeError, "Unknown storage type: %s" % type
+        module, converter = info
+    m = __import__(module, {}, {}, [type])
+    klass = getattr(m, type)
+
+    args = {}
+    if section.name:
+        args["name"] = section.name
+    for key in section.keys():
+        if key.lower() != "type":
+            args[key] = section.get(key)
+    if converter is not None:
+        args = converter(**args)
+    return (klass, args)


=== Zope3/src/zodb/conflict.py 1.1 => 1.2 ===
--- /dev/null	Wed Dec 25 09:13:47 2002
+++ Zope3/src/zodb/conflict.py	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,150 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+
+# It's hard to draw a clear separation between these two modules,
+# because conflict resolution depends (for efficiency and safety) on
+# working with the raw object state instead of instantiated objects.
+
+__metaclass__ = type
+
+from cStringIO import StringIO
+from cPickle import PicklingError
+import logging
+
+from transaction.interfaces import ConflictError
+from zodb.serialize import BaseObjectReader, ObjectWriter, getClassMetadata
+
+ResolvedSerial = "rs"
+
+class ResolvedObjectAdapter:
+    """Adapt an object's raw state to the ObjectWriter protocol.
+
+    ObjectWriter uses an object's __class__ and __getstate__() method
+    to determine how to pickle it.  When conflict resolution occurs,
+    there is no instantiated object; the code deals with the concrete
+    state as returned by __getstate__().  This adapter allows the
+    state to be passed to ObjectWriter without instantiating the
+    object.
+
+    This object should only be used in conjunction with the ObjectWriter.
+    """
+
+    def __init__(self, ghost, state):
+        self._class = ghost.__class__
+        self._state = state
+
+    def __getattribute__(self, name):
+        if name == "__class__":
+            return self._class
+        else:
+            _super = super(ResolvedObjectAdapter, self).__getattribute__
+            return _super(name)
+
+    def __getstate__(self):
+        return self._state
+
+class PersistentReference:
+
+    __slots__ = "oid",
+
+    def __init__(self, oid):
+        self.oid = oid
+
+class ResolveObjectReader(BaseObjectReader):
+
+    # The bad_classes attribute tracks all classes for which an
+    # _p_resolveConflict() method could not be found.  It is used
+    # to avoid repeating work to load classes when it is known
+    # that they can't be imported or don't resolve conflicts.
+    bad_classes = {}
+
+    def __init__(self):
+        self._refs = {}
+
+    def _persistent_load(self, oid):
+        ref = self._refs.get(oid)
+        if ref is None:
+            ref = self._refs[oid] = PersistentReference(oid)
+        return ref
+
+    def unresolvable(cls, klass):
+        """Returns True if class does not support conflict resolution.
+
+        The exact rules are implementation dependent.  This method was
+        written to make testing easier.
+        """
+        meta = getClassMetadata(klass=klass)
+        return meta in cls.bad_classes
+
+    unresolvable = classmethod(unresolvable)
+
+    def getClassMetadata(self, pickle):
+        unpickler = self._get_unpickler(pickle)
+        classmeta = unpickler.load()
+        return classmeta
+
+    def getResolver(self, pickle):
+        # Get the conflict resolution method from a ghost rather
+        # than actually instantiating the object.  _p_resolveConflict()
+        # is really a static method.
+        meta = self.getClassMetadata(pickle)
+        if meta in self.bad_classes:
+            return None
+        try:
+            ghost = self._new_object(*meta)
+        except ImportError:
+            # log failure to import?
+            self.bad_classes[meta] = True
+            return None
+        if ghost is None:
+            return None
+        resolve = getattr(ghost, "_p_resolveConflict", None)
+        if resolve is None:
+            self.bad_classes[meta] = True
+            return None
+        else:
+            return resolve
+
+class ConflictResolvingStorage:
+    "Mix-in class that provides conflict resolution handling for storages"
+
+    def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
+                             committedData=None):
+        reader = ResolveObjectReader()
+        resolve = reader.getResolver(newpickle)
+        if resolve is None:
+            return None
+        newstate = reader.getState(newpickle)
+
+        p = self.loadSerial(oid, oldSerial)
+        try:
+            old = reader.getState(p)
+        except (EOFError, PicklingError), err:
+            logging.warn("CR: Error loading object: %s", err)
+            return None
+        if committedData is None:
+            committedData = self.loadSerial(oid, committedSerial)
+        try:
+            committed = reader.getState(committedData)
+        except (EOFError, PicklingError), err:
+            logging.warn("CR: Error loading object: %s", err)
+            return None
+        try:
+            resolved = resolve(old, committed, newstate)
+        except ConflictError:
+            return None
+
+        writer = ObjectWriter()
+        obj = ResolvedObjectAdapter(resolve.im_self, resolved)
+        return writer.getState(obj)


=== Zope3/src/zodb/connection.py 1.1 => 1.2 === (567/667 lines abridged)
--- /dev/null	Wed Dec 25 09:13:47 2002
+++ Zope3/src/zodb/connection.py	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,664 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+from zodb import interfaces
+from zodb.utils import p64, u64, Set, z64
+
+import tempfile
+
+class TmpStore:
+    """A storage to support savepoints."""
+
+    _bver = ''
+
+    def __init__(self, base_version):
+        self._transaction = None
+        if base_version:
+            self._bver = base_version
+        self._file = tempfile.TemporaryFile()
+        # _pos: current file position
+        # _tpos: file position at last commit point
+        self._pos = self._tpos = 0
+        # _index: map oid to pos of last committed version
+        self._index = {}
+        # _tindex: map oid to pos for new updates
+        self._tindex = {}
+        self._created = Set()
+        self._db = None
+
+    def close(self):
+        # XXX Is this necessary?
+        self._file.close()
+
+    def getName(self):
+        return self._db.getName()
+
+    def getSize(self):
+        return self._pos

[-=- -=- -=- 567 lines omitted -=- -=- -=-]

+        # (self._invalidate_modified) while it still has its
+        # lock.  We don't want another thread to be able to read any
+        # updated data until we've had a chance to send an
+        # invalidation message to all of the other connections!
+
+        self._db.begin_invalidation()
+        # XXX We should really have a try/finally because the begin
+        # call acquired a lock that will only be released in
+        # _invalidate_modified().
+        self._storage.tpc_finish(txn, self._invalidate_modified)
+        try:
+            del self._txns[txn]
+        except KeyError:
+            pass
+
+        self._flush_invalidations()
+
+    def _invalidate_modified(self):
+        for oid in self._modified:
+            self._db.invalidate(oid, self)
+        self._db.finish_invalidation()
+
+    def sync(self):
+        # XXX Is it safe to abort things right now?
+        get_transaction().abort()
+        sync = getattr(self._storage, 'sync', None)
+        if sync is not None:
+            sync()
+        self._flush_invalidations()
+
+class Rollback:
+    """Rollback changes associated with savepoint"""
+
+    # In order to rollback changes for a savepoint(), we must remove
+    # the logged changes from the TmpStore and invalidate any object
+    # that has been changed since the rolledback transaction started.
+
+    # XXX Should it be possible to rollback() to the same savepoint
+    # more than once?
+
+    def __init__(self, conn, tmp_undo):
+        self._conn = conn
+        self._tmp_undo = tmp_undo # undo info from the storage
+
+    def rollback(self):
+        if not self._tmp_undo.current(self._conn._storage):
+            msg = "savepoint has already been committed"
+            raise RollbackError(msg)
+        self._tmp_undo.rollback()
+        self._conn._cache.invalidateMany(self._conn._modified)


=== Zope3/src/zodb/db.py 1.1 => 1.2 ===
--- /dev/null	Wed Dec 25 09:13:47 2002
+++ Zope3/src/zodb/db.py	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,390 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Database objects
+
+$Id$
+"""
+
+__metaclass__ = type
+
+import cPickle, cStringIO, sys
+from threading import Lock
+from time import time, ctime
+from types import StringType
+import logging
+
+from zodb.interfaces import StorageError
+from zodb.connection import Connection
+from zodb.serialize import getDBRoot
+from zodb.ztransaction import Transaction
+from zodb.utils import z64
+
+from transaction import get_transaction
+from transaction.interfaces import IDataManager
+
+class DB:
+    """The Object Database
+
+    The Object database coordinates access to and interaction of one
+    or more connections, which manage object spaces.  Most of the actual work
+    of managing objects is done by the connections.
+    """
+    def __init__(self, storage,
+                 pool_size=7,
+                 cache_size=400,
+                 ):
+        """Create an object database.
+
+        The storage for the object database must be passed in.
+        Optional arguments are:
+
+        pool_size -- The size of the pool of object spaces.
+
+        """
+
+        # Allocate locks:
+        l=Lock()
+        self._a=l.acquire
+        self._r=l.release
+
+        # Setup connection pools and cache info
+        self._pool = []
+        self._allocated = []
+        self._pool_lock = Lock()
+        self._pool_lock.acquire()
+        self._temps = []
+        self._pool_size = pool_size
+        self._cache_size = cache_size
+
+        # Setup storage
+        self._storage = storage
+        storage.registerDB(self)
+        try:
+            storage.load(z64, "")
+        except KeyError:
+            # Create the database's root in the storage if it doesn't exist
+            t = Transaction(description="initial database creation")
+            storage.tpc_begin(t)
+            storage.store(z64, None, getDBRoot(), '', t)
+            storage.tpc_vote(t)
+            storage.tpc_finish(t)
+
+        # Pass through methods:
+        for m in ('history', 'supportsVersions', 'undoInfo', 'versionEmpty',
+                  'versions', 'modifiedInVersion', 'versionEmpty'):
+            setattr(self, m, getattr(storage, m))
+
+    def _closeConnection(self, connection):
+        """Return a connection to the pool"""
+        self._a()
+        try:
+            version = connection._version
+            self._allocated.remove(connection)
+            self._pool.append(connection)
+            if len(self._pool) == 1:
+                # Pool now usable again, unlock it.
+                self._pool_lock.release()
+        finally: self._r()
+
+    def _connectionMap(self, f):
+        self._a()
+        try:
+            map(f, self._allocated)
+
+            # XXX I don't understand what this code is trying to do
+            if self._temps:
+                for cc in self._temps:
+                    if sys.getrefcount(cc) > 3:
+                        f(cc)
+                self._temps = []
+        finally: self._r()
+
+    def abortVersion(self, version):
+        AbortVersion(self, version)
+
+    # XXX I don't think the cache should be used via _cache.
+    # Not sure that both full sweep and minimize need to stay.
+
+    def cacheFullSweep(self):
+        self._connectionMap(lambda c: c._cache.full_sweep())
+
+    def cacheMinimize(self):
+        self._connectionMap(lambda c: c._cache.minimize())
+
+    def close(self):
+        self._storage.close()
+
+    def commitVersion(self, source, destination=''):
+        CommitVersion(self, source, destination)
+
+    def getCacheSize(self):
+        return self._cache_size
+
+    def getName(self):
+        return self._storage.getName()
+
+    def getPoolSize(self):
+        return self._pool_size
+
+    def begin_invalidation(self):
+        # Must be called before first call to invalidate and before
+        # the storage lock is held.
+        self._a()
+
+    def finish_invalidation(self):
+        # Must be called after begin_invalidation() and after final
+        # invalidate() call.
+        self._r()
+
+    def invalidate(self, oid, connection=None, version=''):
+        """Invalidate references to a given oid.
+
+        This is used to indicate that one of the connections has committed a
+        change to the object.  The connection commiting the change should be
+        passed in to prevent useless (but harmless) messages to the
+        connection.
+        """
+        assert oid is not None
+        if connection is not None:
+            assert version == connection._version
+            version = connection._version
+
+        # Notify connections
+        for cc in self._allocated:
+            if cc is not connection:
+                self.invalidateConnection(cc, oid, version)
+
+        if self._temps:
+            # t accumulates all the connections that aren't closed.
+            t = []
+            for cc in self._temps:
+                if cc is not connection:
+                    self.invalidateConnection(cc, oid, version,
+                                              t.append)
+            self._temps = t
+
+    def invalidateConnection(self, conn, oid, version, alive=None):
+        """Send invalidation message to conn for oid on version.
+
+        If the modification occurred on a version, an invalidation is
+        sent only if the version of the mod matches the version of the
+        connection.
+
+        This function also handles garbage collection of connection's
+        that aren't used anymore.  If the optional argument alive is
+        defined, it is a function that is called for all connections
+        that aren't garbage collected.
+        """
+
+        # XXX use weakrefs instead of refcounts?
+        if sys.getrefcount(conn) <= 3:
+            conn.close()
+        else:
+            if alive is not None:
+                alive(conn)
+        if not version or conn.getVersion() == version:
+            conn.invalidate(oid)
+
+    def open(self, version='', transaction=None, temporary=0, force=None,
+             waitflag=1):
+        """Return a object space (AKA connection) to work in
+
+        The optional version argument can be used to specify that a
+        version connection is desired.
+
+        The optional transaction argument can be provided to cause the
+        connection to be automatically closed when a transaction is
+        terminated.  In addition, connections per transaction are
+        reused, if possible.
+
+        Note that the connection pool is managed as a stack, to increate the
+        likelihood that the connection's stack will include useful objects.
+        """
+        self._a()
+        try:
+
+            if transaction is not None:
+                connections=transaction._connections
+                if connections:
+                    v = connections.get(version)
+                    if not (v is None or temporary):
+                        return v
+                else:
+                    transaction._connections = connections = {}
+                transaction = transaction._connections
+
+            if temporary:
+                # This is a temporary connection.
+                # We won't bother with the pools.  This will be
+                # a one-use connection.
+                c = Connection(self, version, cache_size=self._cache_size)
+                self._temps.append(c)
+                if transaction is not None:
+                    transaction[id(c)] = c
+                return c
+
+            # Pool locks are tricky.  Basically, the lock needs to be
+            # set whenever the pool becomes empty so that threads are
+            # forced to wait until the pool gets a connection in it.
+            # The lock is acquired when the (empty) pool is
+            # created. The The lock is acquired just prior to removing
+            # the last connection from the pool and just after adding
+            # a connection to an empty pool.
+
+            if not self._pool:
+                c = None
+                if self._pool_size > len(self._pool) or force:
+                    c = Connection(self, version, cache_size=self._cache_size)
+                    self._pool.append(c)
+
+                if c is None:
+                    if waitflag:
+                        self._r()
+                        self._pool_lock.acquire()
+                        self._a()
+                        if len(self._pool) > 1:
+                            # Note that the pool size will normally be 1 here,
+                            # but it could be higher due to a race condition.
+                            self._pool_lock.release()
+                    else:
+                        return
+            elif len(self._pool) == 1:
+                # Taking last one, lock the pool
+                # Note that another thread might grab the lock
+                # before us, so we might actually block, however,
+                # when we get the lock back, there *will* be a
+                # connection in the pool.
+                self._r()
+                self._pool_lock.acquire()
+                self._a()
+                if len(self._pool) > 1:
+                    # Note that the pool size will normally be 1 here,
+                    # but it could be higher due to a race condition.
+                    self._pool_lock.release()
+
+            # XXX Could look for a connection with the right version
+            c = self._pool.pop()
+            c.reset(version)
+            self._allocated.append(c)
+            for other_conn in self._pool:
+                other_conn.cacheGC()
+
+            if transaction is not None:
+                transaction[version] = c
+            return c
+
+        finally: self._r()
+
+    def pack(self, t=None, days=0):
+        if t is None:
+            t = time()
+        t -= days * 86400
+        try:
+            self._storage.pack(t)
+        except:
+            logging.getLogger("zodb").exception("packing")
+            raise
+
+    def setCacheSize(self, v):
+        self._cache_size = v
+        for c in self._pool:
+            c._cache.cache_size = v
+
+    def setPoolSize(self, v):
+        self._pool_size = v
+
+    def undo(self, id):
+        TransactionalUndo(self, id)
+
+class SimpleDataManager:
+
+    __implements__ = IDataManager
+
+    def __init__(self, db):
+        self._db = db
+        self._storage = db._storage
+        get_transaction().join(self)
+
+    def prepare(self, txn):
+        self._storage.tpc_begin(txn)
+        try:
+            self._prepare(txn)
+            self._storage.tpc_vote(txn)
+        except StorageError, err:
+            logging.getLogger("DB").info("Error during prepare: %s", err)
+            return False
+        else:
+            return True
+
+    def abort(self, txn):
+        self._storage.tpc_abort(txn)
+
+    def commit(self, txn):
+        self._storage.tpc_finish(txn)
+
+    def _prepare(self, txn):
+        # Hook for clients to perform action during 2PC
+        pass
+
+class CommitVersion(SimpleDataManager):
+    """An object that will see to version commit."""
+
+    def __init__(self, db, version, dest=''):
+        super(CommitVersion, self).__init__(db)
+        self._version = version
+        self._dest = dest
+
+    def _prepare(self, txn):
+        self._oids = self._storage.commitVersion(self._version, self._dest,
+                                                 txn)
+
+    def commit(self, txn):
+        super(CommitVersion, self).commit(txn)
+        for oid in self._oids:
+            self._db.invalidate(oid, version=self._dest)
+        if self._dest:
+            # the code above just invalidated the dest version.
+            # now we need to invalidate the source!
+            for oid in self._oids:
+                self._db.invalidate(oid, version=self._version)
+
+class AbortVersion(SimpleDataManager):
+    """An object that will see to version abortion."""
+
+    def __init__(self, db, version):
+        super(AbortVersion, self).__init__(db)
+        self._version = version
+
+    def _prepare(self, txn):
+        self._oids = self._storage.abortVersion(self._version, txn)
+
+    def commit(self, txn):
+        super(AbortVersion, self).commit(txn)
+        for oid in self._oids:
+            self._db.invalidate(oid, version=self._version)
+
+class TransactionalUndo(SimpleDataManager):
+    """An object that will see to transactional undo."""
+
+    def __init__(self, db, tid):
+        super(TransactionalUndo, self).__init__(db)
+        self._tid = tid
+
+    def _prepare(self, txn):
+        self._oids = self._storage.transactionalUndo(self._tid, txn)
+
+    def commit(self, txn):
+        super(TransactionalUndo, self).commit(txn)
+        for oid in self._oids:
+            self._db.invalidate(oid)


=== Zope3/src/zodb/dbdump.py 1.1 => 1.2 ===
--- /dev/null	Wed Dec 25 09:13:47 2002
+++ Zope3/src/zodb/dbdump.py	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,64 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Provide a text dump of a storage based on a storage iterator."""
+
+from zodb.storage.file import FileIterator
+from zodb.timestamp import TimeStamp
+from zodb.utils import u64
+from zodb.storage.tests.base import zodb_unpickle
+from zodb.serialize import SimpleObjectReader
+
+import md5
+import time
+
+def dbdump(iter, outp=None, with_offset=True):
+    i = 0
+    for trans in iter:
+        t = TimeStamp(trans.tid).timeTime()
+        # special case just for FileStorage
+        if with_offset and hasattr(trans, "_pos"):
+            print >> outp, "Trans #%05d tid=%016x time=%s offset=%d" % \
+                  (i, u64(trans.tid), time.ctime(t), trans._pos)
+        else:
+            print >> outp, "Trans #%05d tid=%016x time=%s" % \
+                  (i, u64(trans.tid), time.ctime(t))
+        print >> outp, "\tstatus=%s user=%s description=%s" % \
+              (`trans.status`, trans.user, trans.description)
+        j = 0
+        for rec in trans:
+            if rec.data is None:
+                fullclass = "undo or abort of object creation"
+            else:
+                # Any object reader will do
+                reader = SimpleObjectReader()
+                fullclass = reader.getClassName(rec.data)
+                dig = md5.new(rec.data).hexdigest()
+            # special case for testing purposes
+            if fullclass == "zodb.tests.minpo.MinPO":
+                obj = zodb_unpickle(rec.data)
+                fullclass = "%s %s" % (fullclass, obj.value)
+            if rec.version:
+                version = "version=%s " % rec.version
+            else:
+                version = ''
+            print >> outp, "  data #%05d oid=%016x %sclass=%s" % \
+                  (j, u64(rec.oid), version, fullclass)
+            j += 1
+        print >> outp
+        i += 1
+    iter.close()
+
+def fsdump(path, outp=None, with_offset=True):
+    iter = FileIterator(path)
+    dbdump(iter, outp, with_offset)


=== Zope3/src/zodb/export.py 1.1 => 1.2 ===
--- /dev/null	Wed Dec 25 09:13:47 2002
+++ Zope3/src/zodb/export.py	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,131 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Support for database export and import."""
+
+from zodb.interfaces import ExportError
+from zodb.utils import p64, u64, Set
+from zodb.serialize import findrefs, ObjectCopier
+from transaction import get_transaction
+
+from cStringIO import StringIO
+from cPickle import Pickler, Unpickler
+from tempfile import TemporaryFile
+from types import StringType, TupleType
+
+export_end_marker = '\377' * 16
+
+class ExportImport:
+    # a mixin for use with ZODB.Connection.Connection
+
+    __hooks = None
+
+    def exportFile(self, oid, file=None):
+        if file is None:
+            file = TemporaryFile()
+        elif isinstance(file, StringType):
+            file = open(file, 'w+b')
+        file.write('ZEXP')
+        oids = [oid]
+        done_oids = Set()
+        while oids:
+            oid = oids.pop(0)
+            if oid in done_oids:
+                continue
+            done_oids.add(oid)
+            try:
+                p, serial = self._storage.load(oid, self._version)
+            except:
+                # XXX what exception is expected?
+                pass # Ick, a broken reference
+            else:
+                oids += findrefs(p)
+                file.write(oid)
+                file.write(p64(len(p)))
+                file.write(p)
+        file.write(export_end_marker)
+        return file
+
+    def importFile(self, file, clue=None, customImporters=None):
+        # This is tricky, because we need to work in a transaction!
+        # XXX I think this needs to work in a transaction, because it
+        # needs to write pickles into the storage, which only allows
+        # store() calls between tpc_begin() and tpc_vote().
+
+        if isinstance(file, StringType):
+            file = open(file,'rb')
+        magic = file.read(4)
+
+        if magic != 'ZEXP':
+            if customImporters is not None and customImporters.has_key(magic):
+                file.seek(0)
+                return customImporters[magic](self, file, clue)
+            raise ExportError("Invalid export header")
+
+        t = get_transaction()
+        if clue is not None:
+            t.note(clue)
+
+        L = []
+        if self.__hooks is None:
+            self.__hooks = []
+        self.__hooks.append((file, L))
+        t.join(self)
+        t.savepoint()
+        # Return the root imported object.
+        if L:
+            return self[L[0]]
+        else:
+            return None
+
+    def importHook(self, txn):
+        if self.__hooks is None:
+            return
+        for file, L in self.__hooks:
+            self._importDuringCommit(txn, file, L)
+        del self.__hooks
+
+    def _importDuringCommit(self, txn, file, return_oid_list):
+        """Invoked by the transaction manager mid commit.
+
+        Appends one item, the OID of the first object created,
+        to return_oid_list.
+        """
+        copier = ObjectCopier(self, self._storage, self._created)
+
+        while 1:
+            h = file.read(16)
+            if h == export_end_marker:
+                break
+            if len(h) != 16:
+                raise ExportError("Truncated export file")
+            l = u64(h[8:16])
+            p = file.read(l)
+            if len(p) != l:
+                raise ExportError("Truncated export file")
+
+            # XXX I think it would be better if copier.copy()
+            # returned an oid and a new pickle so that this logic
+            # wasn't smeared across to modules.
+            oid = h[:8]
+            new_ref = copier.oids.get(oid)
+            if new_ref is None:
+                new_oid = self._storage.new_oid()
+                copier.oids[oid] = new_oid, None
+                return_oid_list.append(new_oid)
+                self._created.add(new_oid)
+            else:
+                new_oid = new_ref[0]
+
+            new = copier.copy(p)
+            self._storage.store(new_oid, None, new, self._version, txn)


=== Zope3/src/zodb/interfaces.py 1.1 => 1.2 ===
--- /dev/null	Wed Dec 25 09:13:47 2002
+++ Zope3/src/zodb/interfaces.py	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,289 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""ZODB-defined exceptions
+
+$Id$
+"""
+
+from transaction.interfaces \
+     import TransactionError, RollbackError, ConflictError as _ConflictError
+
+from types import StringType, DictType
+import zodb.utils
+
+def _fmt_oid(oid):
+    return "%016x" % zodb.utils.u64(oid)
+
+def _fmt_undo(oid, reason):
+    s = reason and (": %s" % reason) or ""
+    return "Undo error %s%s" % (_fmt_oid(oid), s)
+
+class POSError(StandardError):
+    """Persistent object system error."""
+
+class POSKeyError(KeyError, POSError):
+    """Key not found in database."""
+
+    def __str__(self):
+        return _fmt_oid(self.args[0])
+
+class ConflictError(_ConflictError):
+    """Two transactions tried to modify the same object at once.
+
+    This transaction should be resubmitted.
+
+    Instance attributes:
+      oid : string
+        the OID (8-byte packed string) of the object in conflict
+      class_name : string
+        the fully-qualified name of that object's class
+      message : string
+        a human-readable explanation of the error
+      serials : (string, string)
+        a pair of 8-byte packed strings; these are the serial numbers
+        (old and new) of the object in conflict.  (Serial numbers are
+        closely related [equal?] to transaction IDs; a ConflictError may
+        be triggered by a serial number mismatch.)
+
+    The caller should pass either object or oid as a keyword argument,
+    but not both of them.  If object is passed, it should be a
+    persistent object with an _p_oid attribute.
+    """
+
+    def __init__(self, message=None, object=None, oid=None, serials=None):
+        if message is None:
+            self.message = "database conflict error"
+        else:
+            self.message = message
+
+        if object is None:
+            self.oid = None
+            self.class_name = None
+        else:
+            self.oid = object._p_oid
+            klass = object.__class__
+            self.class_name = klass.__module__ + "." + klass.__name__
+
+        if oid is not None:
+            assert self.oid is None
+            self.oid = oid
+
+        self.serials = serials
+
+    def __str__(self):
+        extras = []
+        if self.oid:
+            extras.append("oid %s" % _fmt_oid(self.oid))
+        if self.class_name:
+            extras.append("class %s" % self.class_name)
+        if self.serials:
+            extras.append("serial was %s, now %s" %
+                          tuple(map(_fmt_oid, self.serials)))
+        if extras:
+            return "%s (%s)" % (self.message, ", ".join(extras))
+        else:
+            return self.message
+
+    def get_oid(self):
+        return self.oid
+
+    def get_class_name(self):
+        return self.class_name
+
+    def get_old_serial(self):
+        return self.serials[0]
+
+    def get_new_serial(self):
+        return self.serials[1]
+
+    def get_serials(self):
+        return self.serials
+
+class ReadConflictError(ConflictError):
+    """Conflict detected when object was loaded.
+
+    An attempt was made to read an object that has changed in another
+    transaction (eg. another thread or process).
+    """
+    def __init__(self, message=None, object=None, serials=None):
+        if message is None:
+            message = "database read conflict error"
+        ConflictError.__init__(self, message=message, object=object,
+                               serials=serials)
+
+class DanglingReferenceError(TransactionError):
+    """An object has a persistent reference to a missing object.
+
+    If an object is stored and it has a reference to another object
+    that does not exist (for example, it was deleted by pack), this
+    exception may be raised.  Whether a storage supports this feature,
+    it a quality of implementation issue.
+
+    Instance attributes:
+    referer: oid of the object being written
+    missing: referenced oid that does not have a corresponding object
+    """
+
+    def __init__(self, Aoid, Boid):
+        self.referer = Aoid
+        self.missing = Boid
+
+    def __str__(self):
+        return "from %s to %s" % (_fmt_oid(self.referer),
+                                  _fmt_oid(self.missing))
+
+class VersionError(POSError):
+    """An error in handling versions occurred."""
+
+class VersionCommitError(VersionError):
+    """An invalid combination of versions was used in a version commit."""
+
+class VersionLockError(VersionError, TransactionError):
+    """Can't modify an object that is modified in unsaved version."""
+
+    def __init__(self, oid, version):
+        self.oid = oid
+        self.version = version
+
+    def __str__(self):
+        return "%s locked in version %r" % (_fmt_oid(self.oid),
+                                            self.version)
+
+class UndoError(POSError):
+    """An attempt was made to undo a non-undoable transaction."""
+
+    def __init__(self, oid, reason=None):
+        self._oid = oid
+        self._reason = reason
+
+    def __str__(self):
+        return _fmt_undo(self._oid, self._reason)
+
+class MultipleUndoErrors(UndoError):
+    """Several undo errors occured during a single transaction."""
+
+    def __init__(self, errs):
+        # provide an oid and reason for clients that only look at that
+        UndoError.__init__(self, *errs[0])
+        self._errs = errs
+
+    def __str__(self):
+        return "\n".join([_fmt_undo(*pair) for pair in self._errs])
+
+class StorageError(POSError):
+    """Base class for storage based exceptions."""
+
+class StorageTransactionError(StorageError):
+    """An operation was invoked for an invalid transaction or state."""
+
+class StorageSystemError(StorageError):
+    """Panic! Internal storage error!"""
+
+class MountedStorageError(StorageError):
+    """Unable to access mounted storage."""
+
+class ReadOnlyError(StorageError):
+    """Unable to modify objects in a read-only storage."""
+
+class TransactionTooLargeError(StorageTransactionError):
+    """The transaction exhausted some finite storage resource."""
+
+class ExportError(POSError):
+    """An export file doesn't have the right format."""
+
+class Unsupported(POSError):
+    """An feature that is unsupported bt the storage was used."""
+
+class InvalidObjectReference(POSError):
+    """An object contains an invalid reference to another object.
+
+    An invalid reference may be one of:
+
+    o A reference to a wrapped persistent object.
+
+    o A reference to an object in a different database connection.
+    """
+
+
+from zope.interface import Interface
+from zope.interface import Attribute
+
+from transaction.interfaces import ITransaction as _ITransaction
+
+class IConnection(Interface):
+    """Interface required of Connection by ZODB DB.
+
+    The Connection also implements IPersistentDataManager.
+    """
+
+    def reset(version):
+        """Reset connection to use specified version."""
+
+    def getVersion():
+        """Return the version that connection is using."""
+
+    def invalidate(oid):
+        """Invalidate a particular oid
+
+        This marks the oid as invalid, but doesn't actually invalidate
+        it.  The object data will be actually invalidated at certain
+        transaction boundaries.
+
+        XXX The code suggests that invalidate() may sometimes be
+        called with None as the oid, which would mean the "entire
+        transaction" is invalidated.
+        """
+
+    def close():
+        pass
+
+    def cacheGC():
+        pass
+
+class ITransaction(_ITransaction):
+    """Extends base ITransaction with with metadata.
+
+    Client code should use this interface to set attributes.
+    """
+
+    def note(text):
+        """Add the text to the transaction description
+
+        If there previous description isn't empty, a blank line is
+        added before the new text.
+        """
+
+    def setUser(user_name, path="/"):
+        """Set the transaction user name.
+
+        The user is actually set to the path followed by a space and
+        the user name.
+        """
+
+    def setExtendedInfo(name, value):
+        """Set extended information."""
+
+class ITransactionAttrs(_ITransaction):
+    # XXX The following attributes used by storages, so they are part
+    # of the interface.  But I'd rather not have user code explicitly
+    # use the attributes.
+
+    user = Attribute("The user as set by setUser()")
+
+    description = Attribute("A description as set by note()")
+
+    _extension = Attribute(
+        """Extended info as set by setExtendedInfo()
+
+        Should be None or a dictionary.""")


=== Zope3/src/zodb/lockfile.py 1.1 => 1.2 ===
--- /dev/null	Wed Dec 25 09:13:47 2002
+++ Zope3/src/zodb/lockfile.py	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,55 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE
+#
+##############################################################################
+
+from zodb.interfaces import StorageSystemError
+
+# Try to create a function that creates Unix file locks.
+try:
+    import fcntl
+
+    lock_file_FLAG = fcntl.LOCK_EX | fcntl.LOCK_NB
+
+    def lock_file(file):
+        try:
+            un = file.fileno()
+        except:
+            return # don't care if not a real file
+
+        try:
+            fcntl.flock(un, lock_file_FLAG)
+        except:
+            raise StorageSystemError, (
+                "Could not lock the database file.  There must be\n"
+                "another process that has opened the file.\n")
+
+except:
+    # Try windows-specific code:
+    try:
+        from zodb.winlock import LockFile
+        def lock_file(file):
+            try:
+                un=file.fileno()
+            except:
+                return # don't care if not a real file
+
+            try:
+                LockFile(un, 0, 0, 1, 0) # just lock the first byte, who cares
+            except:
+                raise StorageSystemError, (
+                    "Could not lock the database file.  There must be\n"
+                    "another process that has opened the file.\n")
+    except:
+        import logging
+        def lock_file(file):
+            logging.warn("FS: No file-locking support on this platform")


=== Zope3/src/zodb/serialize.py 1.1 => 1.2 ===
--- /dev/null	Wed Dec 25 09:13:47 2002
+++ Zope3/src/zodb/serialize.py	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,319 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Support for ZODB object serialization.
+
+ZODB serializes objects using a custom format based on Python pickles.
+When an object is unserialized, it can be loaded as either a ghost or
+a real object.  A ghost is a persistent object of the appropriate type
+but without any state.  The first time a ghost is accessed, the
+persistence machinery traps access and loads the actual state.  A
+ghost allows many persistent objects to be loaded while minimizing the
+memory consumption of referenced but otherwise unused objects.
+
+Object introspection
+--------------------
+
+XXX Need to define what properties an object must have to be usable
+with the ObjectWriter.  Should document how it determines what the
+class and state of an object are.
+
+Pickle format
+-------------
+
+ZODB pickles objects using a custom format.  Each object pickle had
+two parts: the class description and the object state.  The class
+description must provide enough information to call the class's
+``__new__`` and create an empty object.  Once the object exists, its
+state is passed to ``__getstate__``.
+
+The class metadata is a three-tuple contained the module name, the
+class name, and a tuple of arguments to pass to ``__new__``.  The last
+element may be None if the only argument to ``__new__`` is the class.
+
+Persistent references
+---------------------
+A persistent reference is a pair containing an oid and class metadata.
+XXX Need to write more about when they are used and when plain oids
+are used.
+"""
+
+__metaclass__ = type
+
+from cStringIO import StringIO
+import cPickle
+from types import StringType, TupleType
+import logging
+
+def getClass(module, name):
+    mod = __import__(module)
+    parts = module.split(".")
+    for part in parts[1:]:
+        mod = getattr(mod, part)
+    return getattr(mod, name)
+
+def getClassMetadata(obj=None, klass=None):
+    if klass is None:
+        klass = obj.__class__
+    module = klass.__module__
+    classname = klass.__name__
+    # XXX what if obj is None and we were passed klass?
+    if hasattr(obj, "__getnewargs__"):
+        newargs = obj.__getnewargs__()
+    else:
+        newargs = None
+    return module, classname, newargs
+
+class RootJar:
+    def new_oid(self):
+        return "\0" * 8
+
+def getDBRoot():
+    """Return a serialized database root object."""
+    # Need for the initial bootstrap
+    writer = ObjectWriter(RootJar())
+    from persistence.dict import PersistentDict
+    root = PersistentDict()
+    return writer.getState(root)
+
+class ObjectWriter:
+
+    def __init__(self, jar=None):
+        self._file = StringIO()
+        self._p = cPickle.Pickler(self._file, 1)
+        self._p.persistent_id = self._persistent_id
+        self._stack = []
+        if jar is not None:
+            assert hasattr(jar, "new_oid")
+        self._jar = jar
+
+    def _persistent_id(self, obj):
+        """Test if an object is persistent, returning an oid if it is.
+
+        This function is used by the pickler to test whether an object
+        is persistent.  If it isn't, the function returns None and the
+        object is included in the pickle for the current persistent
+        object.
+
+        If it is persistent, it returns the oid and sometimes a tuple
+        with other stuff.
+        """
+        if not hasattr(obj, '_p_oid'):
+            return None
+
+        oid = obj._p_oid
+
+        # I'd like to write something like this --
+        # if isinstance(oid, types.MemberDescriptor):
+        # -- but I can't because the type doesn't have a canonical name.
+        # Instead, we'll assert that an oid must always be a string
+        if not (oid is None or isinstance(oid, StringType)):
+            # XXX log a warning
+            return None
+
+        if oid is None or obj._p_jar is not self._jar:
+            # XXX log warning if obj._p_jar is not self
+            oid = self._jar.new_oid()
+            obj._p_jar = self._jar
+            obj._p_oid = oid
+            self._stack.append(obj)
+
+        return oid, getClassMetadata(obj)
+
+    def newObjects(self, obj):
+        # The modified object is also a "new" object.
+        # XXX Should only call newObjects() once per Pickler.
+        self._stack.append(obj)
+        return NewObjectIterator(self._stack)
+
+    def getState(self, obj):
+        return self._dump(getClassMetadata(obj), obj.__getstate__())
+
+    def _dump(self, classmeta, state):
+        self._file.reset()
+        self._p.clear_memo()
+        self._p.dump(classmeta)
+        self._p.dump(state)
+        return self._file.getvalue()
+
+class NewObjectIterator:
+
+    # The pickler is used as a forward iterator when the connection
+    # is looking for new objects to pickle.
+
+    def __init__(self, stack):
+        self._stack = stack
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        if self._stack:
+            elt = self._stack.pop()
+            return elt
+        else:
+            raise StopIteration
+
+class BaseObjectReader:
+
+    def _persistent_load(self, oid):
+        # subclasses must define _persistent_load().
+        raise NotImplementedError
+
+    def _get_unpickler(self, pickle):
+        file = StringIO(pickle)
+        unpickler = cPickle.Unpickler(file)
+        unpickler.persistent_load = self._persistent_load
+        return unpickler
+
+    def _new_object(self, module, classname, newargs=None):
+        klass = getClass(module, classname)
+        if newargs is None:
+            obj = klass.__new__(klass)
+        else:
+            obj = klass.__new__(klass, *newargs)
+
+        return obj
+
+    def getClassName(self, pickle):
+        unpickler = self._get_unpickler(pickle)
+        module, classname, newargs = unpickler.load()
+        return "%s.%s" % (module, classname)
+
+    def getGhost(self, pickle):
+        unpickler = self._get_unpickler(pickle)
+        module, classname, newargs = unpickler.load()
+        return self._new_object(module, classname, newargs)
+
+    def getState(self, pickle):
+        unpickler = self._get_unpickler(pickle)
+        unpickler.load() # skip the class metadata
+        state = unpickler.load()
+        return state
+
+    def setGhostState(self, object, pickle):
+        state = self.getState(pickle)
+        object.__setstate__(state)
+
+    def getObject(self, pickle):
+        unpickler = self._get_unpickler(pickle)
+        module, classname, newargs = unpickler.load()
+        obj = self._new_object(module, classname, newargs)
+        state = unpickler.load()
+        obj.__setstate__(state)
+        return obj
+
+class SimpleObjectReader(BaseObjectReader):
+    """Minimal reader for a single data record."""
+
+    def _persistent_load(self, oid):
+        return None
+
+class ConnectionObjectReader(BaseObjectReader):
+
+    def __init__(self, conn, cache):
+        self._conn = conn
+        self._cache = cache
+
+    def _persistent_load(self, oid):
+        # persistent_load function to pass to ObjectReader
+        if isinstance(oid, TupleType):
+            # XXX We get here via new_persistent_id()
+
+            # Quick instance reference.  We know all we need to know
+            # to create the instance w/o hitting the db, so go for it!
+            oid, classmeta = oid
+            obj = self._cache.get(oid)
+            if obj is not None:
+                return obj
+
+            obj = self._new_object(*classmeta)
+
+            # XXX should be done by connection
+            obj._p_oid = oid
+            obj._p_jar = self._conn
+            obj._p_changed = None
+
+            self._cache[oid] = obj
+            return obj
+
+        obj = self._cache.get(oid)
+        if obj is not None:
+            return obj
+        return self._conn[oid]
+
+class CopyReference:
+    def __init__(self, ref):
+        self.ref = ref
+
+class CopyObjectReader(BaseObjectReader):
+
+    def __init__(self, storage, created, oids):
+        self._storage = storage
+        self._created = created
+        self._cache = oids
+
+    def _persistent_load(self, oid):
+        if isinstance(oid, TupleType):
+            oid, classmeta = oid
+        else:
+            classmeta = None
+        new_ref = self._cache.get(oid)
+        if new_ref is None:
+            new_oid = self._storage.new_oid()
+            self._created.add(new_oid)
+            self._cache[oid] = new_ref = new_oid, classmeta
+        return CopyReference(new_ref)
+
+    def readPickle(self, pickle):
+        unpickler = self._get_unpickler(pickle)
+        classmeta = unpickler.load()
+        state = unpickler.load()
+        return classmeta, state
+
+class CopyObjectWriter(ObjectWriter):
+
+    def _persistent_id(self, obj):
+        if isinstance(obj, CopyReference):
+            return obj.ref
+        else:
+            return super(CopyObjectWriter, self)._persistent_id(obj)
+
+class ObjectCopier:
+
+    def __init__(self, jar, storage, created):
+        self.oids = {}
+        self._reader = CopyObjectReader(storage, created, self.oids)
+        self._writer = CopyObjectWriter(jar)
+
+    def copy(self, pickle):
+        classmeta, state = self._reader.readPickle(pickle)
+        return self._writer._dump(classmeta, state)
+
+def findrefs(p):
+    f = StringIO(p)
+    u = cPickle.Unpickler(f)
+    u.persistent_load = L = []
+    u.noload()
+    try:
+        u.noload()
+    except EOFError, err:
+        logging.warn("zodb: Bad pickled: %s", err)
+    # Iterator over L and convert persistent references to simple oids.
+    oids = []
+    for ref in L:
+        if isinstance(ref, TupleType):
+            oids.append(ref[0])
+        else:
+            oids.append(ref)
+    return oids


=== Zope3/src/zodb/timestamp.py 1.1 => 1.2 ===
--- /dev/null	Wed Dec 25 09:13:48 2002
+++ Zope3/src/zodb/timestamp.py	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,26 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+import time
+
+from _timestamp import TimeStamp
+
+def newTimeStamp(prev=None):
+    ts = timeStampFromTime(time.time())
+    if prev is not None:
+        ts = ts.laterThan(prev)
+    return ts
+
+def timeStampFromTime(t):
+    args = time.gmtime(t)[:5] + (t % 60,)
+    return TimeStamp(*args)


=== Zope3/src/zodb/utils.py 1.1 => 1.2 ===
--- /dev/null	Wed Dec 25 09:13:48 2002
+++ Zope3/src/zodb/utils.py	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,54 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+from zodb.timestamp import TimeStamp
+import struct
+import time
+
+z64 = "\0" * 8
+
+def p64(v):
+    """Pack an integer or long into a 8-byte string"""
+    return struct.pack(">Q", v)
+
+def u64(v):
+    """Unpack an 8-byte string into a 64-bit long integer."""
+    return struct.unpack(">Q", v)[0]
+
+def cp(f1, f2, l):
+    read = f1.read
+    write = f2.write
+    n = 8192
+
+    while l > 0:
+        if n > l:
+            n = l
+        d = read(n)
+        if not d:
+            break
+        write(d)
+        l = l - len(d)
+
+try:
+    from sets import Set
+except ImportError:
+    # This must be Python 2.2, which doesn't have a standard sets module.
+    # ZODB needs only a very limited subset of the Set API.
+    class Set(dict):
+        def add(self, o):
+            self[o] = 1
+        def __ior__(self, other):
+            if not isinstance(other, Set):
+                return NotImplemented
+            self.update(other)
+            return self


=== Zope3/src/zodb/winlock.c 1.1 => 1.2 ===
--- /dev/null	Wed Dec 25 09:13:48 2002
+++ Zope3/src/zodb/winlock.c	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,81 @@
+/*****************************************************************************
+
+  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+  All Rights Reserved.
+  
+  This software is subject to the provisions of the Zope Public License,
+  Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+  FOR A PARTICULAR PURPOSE
+  
+ ****************************************************************************/
+static char winlock_doc_string[] = 
+"Lock files on Windows."
+"\n"
+"$Id$\n";
+
+#include "Python.h"
+
+static PyObject *Error;
+
+#ifdef MS_WIN32
+
+#include <windows.h>
+#include <io.h>
+
+static PyObject *	
+winlock(PyObject *ignored, PyObject *args)
+{
+  int fileno;
+  long h, ol, oh, ll, lh;
+  
+  if (! PyArg_ParseTuple(args, "illll", &fileno, &ol, &oh, &ll, &lh))
+    return NULL;
+
+  if ((h=_get_osfhandle(fileno))==-1) {
+    PyErr_SetString(Error, "_get_osfhandle failed");
+    return NULL;
+  }
+  if (LockFile((HANDLE)h, ol, oh, ll, lh)) {
+    Py_INCREF(Py_None);
+    return Py_None;
+  }
+  PyErr_SetObject(Error, PyInt_FromLong(GetLastError()));
+  return NULL;
+}
+
+static struct PyMethodDef methods[] = {
+  {"LockFile",	(PyCFunction)winlock,	1,
+   "LockFile(fileno, offsetLow, offsetHigh, lengthLow, lengthHigh ) -- "
+   "Lock the file associated with fileno"},
+  {NULL,		NULL}		/* sentinel */
+};
+#else
+
+static struct PyMethodDef methods[] = {
+  {NULL,		NULL}		/* sentinel */
+};
+
+#endif
+
+/* Initialization function for the module (*must* be called initcStringIO) */
+
+#ifndef DL_EXPORT	/* declarations for DLL import/export */
+#define DL_EXPORT(RTYPE) RTYPE
+#endif
+DL_EXPORT(void)
+initwinlock(void) {
+  PyObject *m, *d;
+
+  if (!(Error=PyString_FromString("winlock.error"))) 
+      return;
+
+  /* Create the module and add the functions */
+  m = Py_InitModule4("winlock", methods, winlock_doc_string, (PyObject*)NULL,
+		     PYTHON_API_VERSION);
+
+  d = PyModule_GetDict(m);
+  PyDict_SetItemString(d, "error", Error);
+}


=== Zope3/src/zodb/ztransaction.py 1.1 => 1.2 ===
--- /dev/null	Wed Dec 25 09:13:48 2002
+++ Zope3/src/zodb/ztransaction.py	Wed Dec 25 09:12:16 2002
@@ -0,0 +1,49 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+from transaction import set_factory
+from transaction.txn import Transaction as BaseTransaction
+
+from zodb.interfaces import ITransaction, ITransactionAttrs
+
+class Transaction(BaseTransaction):
+
+    __implements__ = ITransaction, ITransactionAttrs
+
+    user = ""
+    description = ""
+    _extension = None
+
+    def __init__(self, manager=None, parent=None,
+                 user=None, description=None):
+        super(Transaction, self).__init__(manager, parent)
+        if user is not None:
+            self.user = user
+        if description is not None:
+            self.description = description
+
+    def note(self, text):
+        if self.description:
+            self.description = "%s\n\n%s" % (self.description, text)
+        else:
+            self.description = text
+
+    def setUser(self, user_name, path='/'):
+        self.user = "%s %s" % (path, user_name)
+
+    def setExtendedInfo(self, name, value):
+        if self._extension is None:
+            self._extension = {}
+        self._extension[name] = value
+
+set_factory(Transaction)