[Zope-CVS] CVS: Products/AdaptableStorage/zodb - utils.py:1.1.2.1 ASConnection.py:1.12.2.1 ASStorage.py:1.9.2.1 OIDEncoder.py:1.4.2.1 RemainingState.py:1.2.2.1

Christian Zagrodnick cz@gocept.com
Mon, 13 Jan 2003 14:15:15 -0500


Update of /cvs-repository/Products/AdaptableStorage/zodb
In directory cvs.zope.org:/tmp/cvs-serv18927

Modified Files:
      Tag: zagy-patches
	ASConnection.py ASStorage.py OIDEncoder.py RemainingState.py 
Added Files:
      Tag: zagy-patches
	utils.py 
Log Message:
merging HEAD into zagy-patches branch


=== Added File Products/AdaptableStorage/zodb/utils.py ===
##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL).  A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Utilities for handling ZODB objects.

$Id: utils.py,v 1.1.2.1 2003/01/13 19:15:12 zagy Exp $
"""

from cStringIO import StringIO
from cPickle import Pickler, Unpickler


def copyOf(object):
    """Copies a ZODB object, loading subobjects as needed.

    Re-ghostifies objects along the way to save memory.
    """
    former_ghosts = []
    def persistent_id(ob, former_ghosts=former_ghosts):
        if getattr(ob, '_p_changed', 0) is None:
            # Load temporarily
            if former_ghosts:
                for g in former_ghosts:
                    g._p_changed = None
                del former_ghosts[:]
            former_ghosts.append(ob)
            ob._p_changed = 0
        return None
    stream = StringIO()
    p = Pickler(stream, 1)
    p.persistent_id = persistent_id
    p.dump(object)
    if former_ghosts:
        for g in former_ghosts:
            g._p_changed = None
        del former_ghosts[:]
    stream.seek(0)
    u = Unpickler(stream)
    return u.load()



=== Products/AdaptableStorage/zodb/ASConnection.py 1.12 => 1.12.2.1 ===
--- Products/AdaptableStorage/zodb/ASConnection.py:1.12	Tue Dec 31 16:47:52 2002
+++ Products/AdaptableStorage/zodb/ASConnection.py	Mon Jan 13 14:15:12 2003
@@ -23,6 +23,7 @@
 from ZODB import Persistent
 from ZODB.Connection import Connection, StringIO, Unpickler, Pickler, \
      ConflictError, ReadConflictError, LOG, ERROR
+from Acquisition import aq_base
 
 from consts import SERIAL0, DEBUG
 from mapper_public import IKeyedObjectSystem, SerializationEvent, \
@@ -144,6 +145,7 @@
                 apply(getattr(self, method_name), (transaction,) + args, kw)
             return
         oid=object._p_oid
+        assert oid != 'unmanaged', repr(object)
         #invalid=self._invalidated.get
         invalid = self._invalid
         if oid is None or object._p_jar is not self:
@@ -189,6 +191,7 @@
             object=stack[-1]
             del stack[-1]
             oid=object._p_oid
+            assert oid != 'unmanaged', repr(object)
             serial=getattr(object, '_p_serial', '\0\0\0\0\0\0\0\0')
             if serial == '\0\0\0\0\0\0\0\0':
                 # new object
@@ -265,14 +268,24 @@
             dump(state)
             p=file(1)
             s=dbstore(oid,serial,p,version,transaction)
+
             # Put the object in the cache before handling the
             # response, just in case the response contains the
             # serial number for a newly created object
-            try: cache[oid]=object
+            try: cache[oid] = object
+            except ValueError:
+                # "Cannot re-register an object under a different
+                # oid".  This can happen when the user is working on
+                # the filesystem and creates an object with an ID that
+                # was used recently.  Try to fix it by minimizing
+                # the cache and trying again.
+                cache.minimize()
+                cache[oid] = object
             except:
-                # Dang, I bet its wrapped:
-                if hasattr(object, 'aq_base'):
-                    cache[oid]=object.aq_base
+                if aq_base(object) is not object:
+                    # Yuck, someone tried to store a wrapper.  Try to
+                    # cache it unwrapped.
+                    cache[oid] = aq_base(object)
                 else:
                     raise
 
@@ -393,6 +406,7 @@
         oid = object._p_oid
         if oid is None:
             return None
+        assert oid != 'unmanaged', repr(object)
         return self._db._oid_encoder.decode(oid)
 
     def newKey(self):


=== Products/AdaptableStorage/zodb/ASStorage.py 1.9 => 1.9.2.1 ===
--- Products/AdaptableStorage/zodb/ASStorage.py:1.9	Tue Dec 31 16:47:52 2002
+++ Products/AdaptableStorage/zodb/ASStorage.py	Mon Jan 13 14:15:12 2003
@@ -21,7 +21,9 @@
 from cStringIO import StringIO
 
 from ZODB import POSException, BaseStorage
-from mapper_public import MapperEvent, ITPCConnection
+from mapper_public \
+     import MapperEvent, LoadEvent, StoreEvent, \
+     ITPCConnection, NoStateFoundError
 
 from consts import SERIAL0, SERIAL1, DEBUG
 from OIDEncoder import OIDEncoder
@@ -90,14 +92,14 @@
             k = keychain[:i + 1]
             cfr = mapper.getClassifier()
             assert cfr is not None, keychain
-            event = MapperEvent(mapper, k)
+            event = LoadEvent(mapper, k)
             classification, sub_mapper_name = cfr.classifyState(event)
             mapper_names.append(sub_mapper_name)
             mapper = mapper.getSubMapper(sub_mapper_name)
-        event = MapperEvent(mapper, keychain)
+        event = LoadEvent(mapper, keychain)
         full_state, serial = mapper.getGateway().load(event)
         return full_state, serial, classification, mapper_names
-        
+
 
     def load(self, oid, version):
         if version:
@@ -138,12 +140,29 @@
             if DEBUG:
                 print 'storing', `oid`, `serial_hash`
             if serial_hash != SERIAL0:
+                # Overwriting an old object.  Use the serial to verify
+                # that the new data was derived from the old data.
                 info = self._load(root_mapper, keychain)
                 old_state, old_serial = info[:2]
                 old_serial_hash = self.hashSerial(old_serial)
                 if serial_hash != old_serial_hash:
-                    raise POSException.ConflictError("%r != %r" % (
-                        serial_hash, old_serial_hash))
+                    raise POSException.ConflictError(
+                        "Storing %s based on old data. %s != %s" % (
+                        repr(keychain),
+                        repr(serial_hash), repr(old_serial_hash)))
+            else:
+                # A new object.  Attempts to load should lead to
+                # NoStateFoundError or a serial of None, otherwise
+                # there's a conflict.
+                try:
+                    info = self._load(root_mapper, keychain)
+                except NoStateFoundError:
+                    pass
+                else:
+                    old_serial = info[1]
+                    if old_serial is not None:
+                        raise POSException.ConflictError(
+                            "%s already exists" % repr(keychain))
 
             # Now unpickle and store the data.
             file = StringIO(data)
@@ -156,7 +175,7 @@
             for mapper_name in mapper_names:
                 cfr = mapper.getClassifier()
                 mapper = mapper.getSubMapper(mapper_name)
-            event = MapperEvent(mapper, keychain)
+            event = StoreEvent(mapper, keychain)
             new_serial = mapper.getGateway().store(event, state)
             if cfr is not None:
                 cfr.store(event, classification)


=== Products/AdaptableStorage/zodb/OIDEncoder.py 1.4 => 1.4.2.1 ===
--- Products/AdaptableStorage/zodb/OIDEncoder.py:1.4	Sat Dec  7 00:59:14 2002
+++ Products/AdaptableStorage/zodb/OIDEncoder.py	Mon Jan 13 14:15:12 2003
@@ -33,7 +33,11 @@
         """Returns a keychain."""
         if oid == ROOT_OID:
             return ()
-        keychain = loads(oid)
+        try:
+            keychain = loads(oid)
+        except MemoryError:
+            print repr(oid)
+            raise
         assert isinstance(keychain, TupleType)
         return keychain
 


=== Products/AdaptableStorage/zodb/RemainingState.py 1.2 => 1.2.2.1 ===
--- Products/AdaptableStorage/zodb/RemainingState.py:1.2	Tue Dec 31 16:47:52 2002
+++ Products/AdaptableStorage/zodb/RemainingState.py	Mon Jan 13 14:15:12 2003
@@ -16,8 +16,9 @@
 $Id$
 """
 
+import os
 from cStringIO import StringIO
-from cPickle import Pickler, Unpickler
+from cPickle import Pickler, Unpickler, UnpickleableError
 from types import DictType
 
 from ZODB import Persistent
@@ -35,6 +36,12 @@
     def getSchema(self):
         return self.schema
 
+    def canSerialize(self, object):
+        try:
+            return isinstance(object, Persistent)
+        except TypeError:
+            # XXX Python 2.1 thinks Persistent is not a class
+            return 0
 
     def serialize(self, object, event):
         assert IFullSerializationEvent.isImplementedBy(event)
@@ -58,7 +65,7 @@
                           unmanaged=unmanaged):
             ref = getInternalRef(ob)
             if ref is None:
-                if isinstance(ob, Persistent):
+                if hasattr(ob, '_p_oid'):
                     # Persistent objects that end up in the remainder
                     # are unmanaged.  Tell ZODB about them so that
                     # ZODB can deal with them specially.
@@ -66,7 +73,40 @@
             return ref
 
         p.persistent_id = persistent_id
-        p.dump(state)
+        try:
+            p.dump(state)
+        except UnpickleableError, exc:
+            # Try to reveal which attribute is unpickleable.
+            attrname = None
+            attrvalue = None
+            for key, value in state.items():
+                del unmanaged[:]
+                outfile.seek(0)
+                outfile.truncate()
+                p = Pickler(outfile)
+                p.persistent_id = persistent_id
+                try:
+                    p.dump(value)
+                except UnpickleableError:
+                    attrname = key
+                    attrvalue = value
+                    break
+            if attrname is not None:
+                # Provide a more informative exception.
+                if os.environ.get('ZOPE_TRACE_UNPICKLEABLE'):
+                    # Provide an opportunity to examine
+                    # the "attrvalue" attribute.
+                    import pdb
+                    pdb.set_trace()
+                raise RuntimeError(
+                    'Unable to pickle the %s attribute, %s, '
+                    'of %s at %s.  %s.' % (
+                    repr(attrname), repr(attrvalue), repr(object),
+                    repr(event.getKeychain()), str(exc)))
+            else:
+                # Couldn't help.
+                raise
+
         p.dump(unmanaged)
         s = outfile.getvalue()
         event.addUnmanagedPersistentObjects(unmanaged)
@@ -85,7 +125,7 @@
             try:
                 unmanaged = u.load()
             except EOFError:
-                # old pickle
+                # old pickle with no list of unmanaged objects
                 pass
             else:
                 event.addUnmanagedPersistentObjects(unmanaged)