[Checkins] SVN: relstorage/trunk/relstorage/ History-free storage is born. :-)

Shane Hathaway shane at hathawaymix.org
Fri Sep 25 04:05:01 EDT 2009


Log message for revision 104516:
  History-free storage is born. :-)
  

Changed:
  U   relstorage/trunk/relstorage/adapters/mover.py
  U   relstorage/trunk/relstorage/adapters/packundo.py
  U   relstorage/trunk/relstorage/adapters/poller.py
  U   relstorage/trunk/relstorage/adapters/schema.py
  U   relstorage/trunk/relstorage/adapters/txncontrol.py
  U   relstorage/trunk/relstorage/relstorage.py
  U   relstorage/trunk/relstorage/tests/README.txt
  A   relstorage/trunk/relstorage/tests/RecoveryStorage.py
  A   relstorage/trunk/relstorage/tests/blob/
  A   relstorage/trunk/relstorage/tests/blob/__init__.py
  A   relstorage/trunk/relstorage/tests/blob/blob_connection.txt
  A   relstorage/trunk/relstorage/tests/blob/blob_importexport.txt
  A   relstorage/trunk/relstorage/tests/blob/blob_packing.txt
  A   relstorage/trunk/relstorage/tests/blob/blob_packing_history_free.txt
  A   relstorage/trunk/relstorage/tests/blob/blob_transaction.txt
  A   relstorage/trunk/relstorage/tests/blob/testblob.py
  A   relstorage/trunk/relstorage/tests/hftestbase.py
  A   relstorage/trunk/relstorage/tests/hptestbase.py
  U   relstorage/trunk/relstorage/tests/reltestbase.py
  U   relstorage/trunk/relstorage/tests/testmysql.py
  U   relstorage/trunk/relstorage/tests/testoracle.py
  U   relstorage/trunk/relstorage/tests/testpostgresql.py

-=-
Modified: relstorage/trunk/relstorage/adapters/mover.py
===================================================================
--- relstorage/trunk/relstorage/adapters/mover.py	2009-09-24 22:18:48 UTC (rev 104515)
+++ relstorage/trunk/relstorage/adapters/mover.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -572,6 +572,12 @@
             cursor.execute(stmt, (oid, tid, oid, md5sum, encoded))
         else:
             stmt = """
+            DELETE FROM object_state
+            WHERE zoid = %s
+            """
+            cursor.execute(stmt, (oid,))
+
+            stmt = """
             INSERT INTO object_state (zoid, tid, state)
             VALUES (%s, %s, decode(%s, 'base64'))
             """
@@ -601,11 +607,18 @@
             """
             cursor.execute(stmt, (oid, tid, oid, md5sum, encoded))
         else:
-            stmt = """
-            INSERT INTO object_state (zoid, tid, state)
-            VALUES (%s, %s, %s)
-            """
-            cursor.execute(stmt, (oid, tid, encoded))
+            if not data:
+                stmt = """
+                DELETE FROM object_state
+                WHERE zoid = %s
+                """
+                cursor.execute(stmt, (oid,))
+            else:
+                stmt = """
+                REPLACE INTO object_state (zoid, tid, state)
+                VALUES (%s, %s, %s)
+                """
+                cursor.execute(stmt, (oid, tid, encoded))
 
     def oracle_restore(self, cursor, oid, tid, data):
         """Store an object directly, without conflict detection.
@@ -616,6 +629,11 @@
             md5sum = compute_md5sum(data)
         else:
             md5sum = None
+            stmt = """
+            DELETE FROM object_state
+            WHERE zoid = %s
+            """
+            cursor.execute(stmt, (oid,))
 
         if not data or len(data) <= 2000:
             # Send data inline for speed.  Oracle docs say maximum size
@@ -659,6 +677,7 @@
 
 
 
+
     def postgresql_detect_conflict(self, cursor):
         """Find one conflict in the data about to be committed.
 

Modified: relstorage/trunk/relstorage/adapters/packundo.py
===================================================================
--- relstorage/trunk/relstorage/adapters/packundo.py	2009-09-24 22:18:48 UTC (rev 104515)
+++ relstorage/trunk/relstorage/adapters/packundo.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -104,7 +104,7 @@
         if not self.keep_history:
             stmt = """
             DELETE FROM object_ref
-            WHERE tid in (
+            WHERE zoid in (
                 SELECT zoid
                 FROM object_state
                 WHERE tid = %(tid)s
@@ -317,6 +317,10 @@
         # transaction to undo matches the object's current state.
         # If any object in the transaction does not fit that rule,
         # refuse to undo.
+        # (Note that this prevents conflict-resolving undo as described
+        # by ZODB.tests.ConflictResolution.ConflictResolvingTransUndoStorage.
+        # Do people need that? If so, we can probably support it, but it
+        # will require additional code.)
         stmt = """
         SELECT prev_os.zoid, current_object.tid
         FROM object_state prev_os
@@ -952,7 +956,6 @@
 
         Requires the information provided by _pre_gc.
         """
-
         # Read committed mode is sufficient.
         conn, cursor = self.connmanager.open()
         try:

Modified: relstorage/trunk/relstorage/adapters/poller.py
===================================================================
--- relstorage/trunk/relstorage/adapters/poller.py	2009-09-24 22:18:48 UTC (rev 104515)
+++ relstorage/trunk/relstorage/adapters/poller.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -48,35 +48,39 @@
             return (), new_polled_tid
 
         if self.keep_history:
+            # If the previously polled transaction no longer exists,
+            # the cache is too old and needs to be cleared.
+            # XXX Do we actually need to detect this condition? I think
+            # if we delete this block of code, all the reachable objects
+            # will be invalidated anyway.
             stmt = "SELECT 1 FROM transaction WHERE tid = %(tid)s"
-        else:
-            stmt = "SELECT 1 FROM object_state WHERE tid <= %(tid)s LIMIT 1"
-        cursor.execute(intern(stmt % self.runner.script_vars),
-            {'tid': prev_polled_tid})
-        rows = cursor.fetchall()
-        if not rows:
-            # Transaction not found; perhaps it has been packed.
-            # The connection cache needs to be cleared.
-            return None, new_polled_tid
+            cursor.execute(intern(stmt % self.runner.script_vars),
+                {'tid': prev_polled_tid})
+            rows = cursor.fetchall()
+            if not rows:
+                # Transaction not found; perhaps it has been packed.
+                # The connection cache needs to be cleared.
+                return None, new_polled_tid
 
         # Get the list of changed OIDs and return it.
-        if ignore_tid is None:
+        if self.keep_history:
             stmt = """
             SELECT zoid
             FROM current_object
             WHERE tid > %(tid)s
             """
-            cursor.execute(intern(stmt % self.runner.script_vars),
-                {'tid': prev_polled_tid})
         else:
             stmt = """
             SELECT zoid
-            FROM current_object
+            FROM object_state
             WHERE tid > %(tid)s
-                AND tid != %(self_tid)s
             """
-            cursor.execute(intern(stmt % self.runner.script_vars),
-                {'tid': prev_polled_tid, 'self_tid': ignore_tid})
+        params = {'tid': prev_polled_tid}
+        if ignore_tid is not None:
+            stmt += " AND tid != %(self_tid)s"
+            params['self_tid'] = ignore_tid
+        stmt = intern(stmt % self.runner.script_vars)
+        cursor.execute(stmt, params)
         oids = [oid for (oid,) in cursor]
 
         return oids, new_polled_tid

Modified: relstorage/trunk/relstorage/adapters/schema.py
===================================================================
--- relstorage/trunk/relstorage/adapters/schema.py	2009-09-24 22:18:48 UTC (rev 104515)
+++ relstorage/trunk/relstorage/adapters/schema.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -14,10 +14,10 @@
 """Database schema installers
 """
 from relstorage.adapters.interfaces import ISchemaInstaller
+from ZODB.POSException import StorageError
 from zope.interface import implements
 import time
 
-
 history_preserving_schema = """
 
 # commit_lock: Held during commit.  Another kind of lock is used for MySQL.
@@ -552,6 +552,7 @@
     def __init__(self, connmanager, runner, keep_history):
         self.connmanager = connmanager
         self.runner = runner
+        self.keep_history = keep_history
         if keep_history:
             self.schema_script = history_preserving_schema
             self.init_script = history_preserving_init
@@ -578,8 +579,26 @@
             tables = self.list_tables(cursor)
             if not 'object_state' in tables:
                 self.create(cursor)
+            else:
+                self.check_compatibility(cursor, tables)
         self.connmanager.open_and_call(callback)
 
+    def check_compatibility(self, cursor, tables):
+        if self.keep_history:
+            if 'transaction' not in tables and 'current_object' not in tables:
+                raise StorageError(
+                    "Schema mismatch: a history-preserving adapter "
+                    "can not connect to a history-free database. "
+                    "If you need to convert, use the zodbconvert utility."
+                    )
+        else:
+            if 'transaction' in tables and 'current_object' in tables:
+                raise StorageError(
+                    "Schema mismatch: a history-free adapter "
+                    "can not connect to a history-preserving database. "
+                    "If you need to convert, use the zodbconvert utility."
+                    )
+
     def zap_all(self):
         """Clear all data out of the database."""
         def callback(conn, cursor):

Modified: relstorage/trunk/relstorage/adapters/txncontrol.py
===================================================================
--- relstorage/trunk/relstorage/adapters/txncontrol.py	2009-09-24 22:18:48 UTC (rev 104515)
+++ relstorage/trunk/relstorage/adapters/txncontrol.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -67,6 +67,7 @@
             ORDER BY tid DESC
             LIMIT 1
             """
+            cursor.execute(stmt)
         else:
             stmt = """
             SELECT tid, EXTRACT(EPOCH FROM CURRENT_TIMESTAMP)
@@ -74,7 +75,12 @@
             ORDER BY tid DESC
             LIMIT 1
             """
-        cursor.execute(stmt)
+            cursor.execute(stmt)
+            if not cursor.rowcount:
+                # nothing has been stored yet
+                stmt = "SELECT 0, EXTRACT(EPOCH FROM CURRENT_TIMESTAMP)"
+                cursor.execute(stmt)
+
         assert cursor.rowcount == 1
         return cursor.fetchone()
 
@@ -115,6 +121,7 @@
             LIMIT 1
             LOCK IN SHARE MODE
             """
+            cursor.execute(stmt)
         else:
             stmt = """
             SELECT tid, UNIX_TIMESTAMP()
@@ -123,7 +130,12 @@
             LIMIT 1
             LOCK IN SHARE MODE
             """
-        cursor.execute(stmt)
+            cursor.execute(stmt)
+            if not cursor.rowcount:
+                # nothing has been stored yet
+                stmt = "SELECT 0, UNIX_TIMESTAMP()"
+                cursor.execute(stmt)
+
         assert cursor.rowcount == 1
         tid, timestamp = cursor.fetchone()
         # MySQL does not provide timestamps with more than one second
@@ -189,6 +201,8 @@
                 '1970-01-01 00:00:00 +00:00','YYYY-MM-DD HH24:MI:SS TZH:TZM')))
             FROM transaction
             """
+            cursor.execute(stmt)
+            rows = list(cursor)
         else:
             stmt = """
             SELECT MAX(tid),
@@ -196,8 +210,21 @@
                 '1970-01-01 00:00:00 +00:00','YYYY-MM-DD HH24:MI:SS TZH:TZM')))
             FROM object_state
             """
-        cursor.execute(stmt)
-        tid, now = cursor.fetchone()
+            cursor.execute(stmt)
+            rows = list(cursor)
+            if not rows:
+                # nothing has been stored yet
+                stmt = """
+                SELECT 0,
+                TO_CHAR(TO_DSINTERVAL(SYSTIMESTAMP - TO_TIMESTAMP_TZ(
+                '1970-01-01 00:00:00 +00:00','YYYY-MM-DD HH24:MI:SS TZH:TZM')))
+                FROM DUAL
+                """
+                cursor.execute(stmt)
+                rows = list(cursor)
+
+        assert len(rows) == 1
+        tid, now = rows[0]
         return tid, self._parse_dsinterval(now)
 
     def add_transaction(self, cursor, tid, username, description, extension,

Modified: relstorage/trunk/relstorage/relstorage.py
===================================================================
--- relstorage/trunk/relstorage/relstorage.py	2009-09-24 22:18:48 UTC (rev 104515)
+++ relstorage/trunk/relstorage/relstorage.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -737,6 +737,23 @@
                     # doesn't matter who won.
                     cache.incr('commit_count')
             self._ltid = self._tid
+
+            #if self._txn_blobs and not self._adapter.keep_history:
+                ## For each blob just committed, get the name of
+                ## one earlier revision (if any) and write the
+                ## name of the file to a log.  At pack time,
+                ## all the files in the log will be deleted and
+                ## the log will be cleared.
+                #for oid, filename in self._txn_blobs.iteritems():
+                    #dirname, current_name = os.path.split(filename)
+                    #names = os.listdir(dirname)
+                    #names.sort()
+                    #if current_name in names:
+                        #i = names.index(current_name)
+                        #if i > 0:
+                        #    to_delete = os.path.join(dirname, names[i-1])
+                        #    log.write('%s\n') % to_delete
+
         finally:
             self._txn_blobs = None
             self._prepared_txn = None
@@ -913,7 +930,7 @@
             self._lock_release()
 
     def _copy_undone_blobs(self, copied):
-        """After an undo operation, copies the matching blobs forward.
+        """After an undo operation, copy the matching blobs forward.
 
         The copied parameter is a list of (integer oid, integer tid).
         """
@@ -996,6 +1013,8 @@
             adapter.connmanager.close(lock_conn, lock_cursor)
         self.sync()
 
+        self._pack_finished()
+
     def _after_pack(self, oid_int, tid_int):
         """Called after an object state has been removed by packing.
 
@@ -1004,12 +1023,27 @@
         oid = p64(oid_int)
         tid = p64(tid_int)
         fn = self.fshelper.getBlobFilename(oid, tid)
-        if os.path.exists(fn):
-            ZODB.blob.remove_committed(fn)
+        if self._adapter.keep_history:
+            # remove only the revision just packed
+            if os.path.exists(fn):
+                ZODB.blob.remove_committed(fn)
+                dirname = os.path.dirname(fn)
+                if not os.listdir(dirname):
+                    ZODB.blob.remove_committed_dir(dirname)
+        else:
+            # remove all revisions
             dirname = os.path.dirname(fn)
-            if not os.listdir(dirname):
+            if os.path.exists(dirname):
+                for name in os.listdir(dirname):
+                    ZODB.blob.remove_committed(os.path.join(dirname, name))
                 ZODB.blob.remove_committed_dir(dirname)
 
+    def _pack_finished(self):
+        if self.fshelper is None or self._adapter.keep_history:
+            return
+
+        # Remove all old revisions of blobs.
+
     def iterator(self, start=None, stop=None):
         return TransactionIterator(self._adapter, start, stop)
 

Modified: relstorage/trunk/relstorage/tests/README.txt
===================================================================
--- relstorage/trunk/relstorage/tests/README.txt	2009-09-24 22:18:48 UTC (rev 104515)
+++ relstorage/trunk/relstorage/tests/README.txt	2009-09-25 08:05:01 UTC (rev 104516)
@@ -12,12 +12,16 @@
 CREATE USER relstoragetest WITH PASSWORD 'relstoragetest';
 CREATE DATABASE relstoragetest OWNER relstoragetest;
 CREATE DATABASE relstoragetest2 OWNER relstoragetest;
+CREATE DATABASE relstoragetest_hf OWNER relstoragetest;
+CREATE DATABASE relstoragetest2_hf OWNER relstoragetest;
 
 Also, add the following lines to the top of pg_hba.conf (if you put
 them at the bottom, they may be overridden by other parameters):
 
-local   relstoragetest  relstoragetest                md5
-local   relstoragetest2 relstoragetest                md5
+local   relstoragetest     relstoragetest   md5
+local   relstoragetest2    relstoragetest   md5
+local   relstoragetest_hf  relstoragetest   md5
+local   relstoragetest2_hf relstoragetest   md5
 
 
 MySQL
@@ -28,4 +32,8 @@
 GRANT ALL ON relstoragetest.* TO 'relstoragetest'@'localhost';
 CREATE DATABASE relstoragetest2;
 GRANT ALL ON relstoragetest2.* TO 'relstoragetest'@'localhost';
+CREATE DATABASE relstoragetest_hf;
+GRANT ALL ON relstoragetest_hf.* TO 'relstoragetest'@'localhost';
+CREATE DATABASE relstoragetest2_hf;
+GRANT ALL ON relstoragetest2_hf.* TO 'relstoragetest'@'localhost';
 FLUSH PRIVILEGES;

Added: relstorage/trunk/relstorage/tests/RecoveryStorage.py
===================================================================
--- relstorage/trunk/relstorage/tests/RecoveryStorage.py	                        (rev 0)
+++ relstorage/trunk/relstorage/tests/RecoveryStorage.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -0,0 +1,320 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""More recovery and iterator tests."""
+
+# This is copied from ZODB.tests.RecoveryStorage and expanded to fit
+# history-free storages.
+
+import itertools
+import transaction
+from transaction import Transaction
+from ZODB.tests.StorageTestBase import MinPO, zodb_unpickle, snooze
+from ZODB import DB
+import ZODB.POSException
+from ZODB.serialize import referencesf
+import ZODB.blob
+
+import time
+
+
+class IteratorDeepCompare:
+
+    def compare(self, storage1, storage2):
+        # override this for storages that truncate on restore (because
+        # they do not store history).
+        self.compare_exact(storage1, storage2)
+
+    def compare_exact(self, storage1, storage2):
+        """Confirm that storage1 and storage2 contain equivalent data"""
+        eq = self.assertEqual
+        missing = object()
+        iter1 = storage1.iterator()
+        iter2 = storage2.iterator()
+        for txn1, txn2 in itertools.izip(iter1, iter2):
+            eq(txn1.tid,         txn2.tid)
+            eq(txn1.status,      txn2.status)
+            eq(txn1.user,        txn2.user)
+            eq(txn1.description, txn2.description)
+
+            # b/w compat on the 'extension' attribute
+            e1 = getattr(txn1, 'extension', missing)
+            if e1 is missing:
+                # old attribute name
+                e1 = txn1._extension
+            e2 = getattr(txn2, 'extension', missing)
+            if e2 is missing:
+                # old attribute name
+                e2 = txn2._extension
+            eq(e1, e2)
+
+            # compare the objects in the transaction, but disregard
+            # the order of the objects since that is not important.
+            recs1 = [(r.oid, r) for r in txn1]
+            recs1.sort()
+            recs2 = [(r.oid, r) for r in txn2]
+            recs2.sort()
+            eq(len(recs1), len(recs2))
+            for (oid1, rec1), (oid2, rec2) in itertools.izip(recs1, recs2):
+                eq(rec1.oid, rec2.oid)
+                eq(rec1.tid, rec2.tid)
+                eq(rec1.data, rec2.data)
+                if ZODB.blob.is_blob_record(rec1.data):
+                    try:
+                        fn1 = storage1.loadBlob(rec1.oid, rec1.tid)
+                    except ZODB.POSException.POSKeyError:
+                        self.assertRaises(
+                            ZODB.POSException.POSKeyError,
+                            storage2.loadBlob, rec1.oid, rec1.tid)
+                    else:
+                        fn2 = storage2.loadBlob(rec1.oid, rec1.tid)
+                        self.assert_(fn1 != fn2)
+                        eq(open(fn1, 'rb').read(), open(fn2, 'rb').read())
+
+        # Make sure ther are no more records left in txn1 and txn2, meaning
+        # they were the same length
+        self.assertRaises(IndexError, iter1.next)
+        self.assertRaises(IndexError, iter2.next)
+        self.assertRaises(StopIteration, iter1.next)
+        self.assertRaises(StopIteration, iter2.next)
+        iter1.close()
+        iter2.close()
+
+    def compare_truncated(self, src, dest):
+        """Confirm that dest is a truncated copy of src.
+
+        The copy process should have dropped all old revisions of objects
+        in src.  Also note that the dest does not retain transaction
+        metadata.
+        """
+        missing = object()
+        src_objects = {}  # {oid: (tid, data, blob or None)}
+        for txn in src.iterator():
+            for rec in txn:
+                if ZODB.blob.is_blob_record(rec.data):
+                    try:
+                        fn = src.loadBlob(rec.oid, rec.tid)
+                    except ZODB.POSException.POSKeyError:
+                        blob = None
+                    else:
+                        blob = open(fn, 'rb').read()
+                else:
+                    blob = None
+                src_objects[rec.oid] = (rec.tid, rec.data, blob)
+
+        unchecked = set(src_objects)
+        for txn in dest.iterator():
+            for rec in txn:
+                if ZODB.blob.is_blob_record(rec.data):
+                    try:
+                        fn = dest.loadBlob(rec.oid, rec.tid)
+                    except ZODB.POSException.POSKeyError:
+                        blob = None
+                    else:
+                        blob = open(fn, 'rb').read()
+                else:
+                    blob = None
+                dst_object = (rec.tid, rec.data, blob)
+                src_object = src_objects[rec.oid]
+                self.assertEqual(src_object, dst_object)
+                unchecked.remove(rec.oid)
+
+        self.assertEqual(len(unchecked), 0)
+
+
+class BasicRecoveryStorage(IteratorDeepCompare):
+
+    # Requires a setUp() that creates a self._dst destination storage
+    def checkSimpleRecovery(self):
+        oid = self._storage.new_oid()
+        revid = self._dostore(oid, data=11)
+        revid = self._dostore(oid, revid=revid, data=12)
+        revid = self._dostore(oid, revid=revid, data=13)
+        self._dst.copyTransactionsFrom(self._storage)
+        self.compare(self._storage, self._dst)
+
+    def checkPackWithGCOnDestinationAfterRestore(self):
+        raises = self.assertRaises
+        db = DB(self._storage)
+        conn = db.open()
+        root = conn.root()
+        root.obj = obj1 = MinPO(1)
+        txn = transaction.get()
+        txn.note('root -> obj')
+        txn.commit()
+        root.obj.obj = obj2 = MinPO(2)
+        txn = transaction.get()
+        txn.note('root -> obj -> obj')
+        txn.commit()
+        del root.obj
+        txn = transaction.get()
+        txn.note('root -X->')
+        txn.commit()
+        # Now copy the transactions to the destination
+        self._dst.copyTransactionsFrom(self._storage)
+        # If the source storage is a history-free storage, all
+        # of the transactions are now marked as packed in the
+        # destination storage.  To trigger a pack, we have to
+        # add another transaction to the destination that is
+        # not packed.
+        db2 = DB(self._dst)
+        conn2 = db2.open()
+        conn2.root().extra = 0
+        txn = transaction.get()
+        txn.note('root.extra = 0')
+        txn.commit()
+        # Now pack the destination.
+        snooze()
+        self._dst.pack(time.time(),  referencesf)
+        # And check to see that the root object exists, but not the other
+        # objects.
+        data, serial = self._dst.load(root._p_oid, '')
+        raises(KeyError, self._dst.load, obj1._p_oid, '')
+        raises(KeyError, self._dst.load, obj2._p_oid, '')
+
+
+class UndoableRecoveryStorage(BasicRecoveryStorage):
+    """These tests require the source storage to be undoable"""
+
+    def checkRestoreAcrossPack(self):
+        db = DB(self._storage)
+        c = db.open()
+        r = c.root()
+        obj = r["obj1"] = MinPO(1)
+        transaction.commit()
+        obj = r["obj2"] = MinPO(1)
+        transaction.commit()
+
+        self._dst.copyTransactionsFrom(self._storage)
+        self._dst.pack(time.time(), referencesf)
+
+        self._undo(self._storage.undoInfo()[0]['id'])
+
+        # copy the final transaction manually.  even though there
+        # was a pack, the restore() ought to succeed.
+        it = self._storage.iterator()
+        # Get the last transaction and its record iterator. Record iterators
+        # can't be accessed out-of-order, so we need to do this in a bit
+        # complicated way:
+        for final  in it:
+            records = list(final)
+
+        self._dst.tpc_begin(final, final.tid, final.status)
+        for r in records:
+            self._dst.restore(r.oid, r.tid, r.data, '', r.data_txn,
+                              final)
+        self._dst.tpc_vote(final)
+        self._dst.tpc_finish(final)
+
+    def checkRestoreWithMultipleObjectsInUndoRedo(self):
+        from ZODB.FileStorage import FileStorage
+
+        # Undo creates backpointers in (at least) FileStorage.  ZODB 3.2.1
+        # FileStorage._data_find() had an off-by-8 error, neglecting to
+        # account for the size of the backpointer when searching a
+        # transaction with multiple data records.  The results were
+        # unpredictable.  For example, it could raise a Python exception
+        # due to passing a negative offset to file.seek(), or could
+        # claim that a transaction didn't have data for an oid despite
+        # that it actually did.
+        #
+        # The former failure mode was seen in real life, in a ZRS secondary
+        # doing recovery.  On my box today, the second failure mode is
+        # what happens in this test (with an unpatched _data_find, of
+        # course).  Note that the error can only "bite" if more than one
+        # data record is in a transaction, and the oid we're looking for
+        # follows at least one data record with a backpointer.
+        #
+        # Unfortunately, _data_find() is a low-level implementation detail,
+        # and this test does some horrid white-box abuse to test it.
+
+        is_filestorage = isinstance(self._storage, FileStorage)
+
+        db = DB(self._storage)
+        c = db.open()
+        r = c.root()
+
+        # Create some objects.
+        r["obj1"] = MinPO(1)
+        r["obj2"] = MinPO(1)
+        transaction.commit()
+
+        # Add x attributes to them.
+        r["obj1"].x = 'x1'
+        r["obj2"].x = 'x2'
+        transaction.commit()
+
+        r = db.open().root()
+        self.assertEquals(r["obj1"].x, 'x1')
+        self.assertEquals(r["obj2"].x, 'x2')
+
+        # Dirty tricks.
+        if is_filestorage:
+            obj1_oid = r["obj1"]._p_oid
+            obj2_oid = r["obj2"]._p_oid
+            # This will be the offset of the next transaction, which
+            # will contain two backpointers.
+            pos = self._storage.getSize()
+
+        # Undo the attribute creation.
+        info = self._storage.undoInfo()
+        tid = info[0]['id']
+        t = Transaction()
+        self._storage.tpc_begin(t)
+        oids = self._storage.undo(tid, t)
+        self._storage.tpc_vote(t)
+        self._storage.tpc_finish(t)
+
+        r = db.open().root()
+        self.assertRaises(AttributeError, getattr, r["obj1"], 'x')
+        self.assertRaises(AttributeError, getattr, r["obj2"], 'x')
+
+        if is_filestorage:
+            # _data_find should find data records for both objects in that
+            # transaction.  Without the patch, the second assert failed
+            # (it claimed it couldn't find a data record for obj2) on my
+            # box, but other failure modes were possible.
+            self.assert_(self._storage._data_find(pos, obj1_oid, '') > 0)
+            self.assert_(self._storage._data_find(pos, obj2_oid, '') > 0)
+
+            # The offset of the next ("redo") transaction.
+            pos = self._storage.getSize()
+
+        # Undo the undo (restore the attributes).
+        info = self._storage.undoInfo()
+        tid = info[0]['id']
+        t = Transaction()
+        self._storage.tpc_begin(t)
+        oids = self._storage.undo(tid, t)
+        self._storage.tpc_vote(t)
+        self._storage.tpc_finish(t)
+
+        r = db.open().root()
+        self.assertEquals(r["obj1"].x, 'x1')
+        self.assertEquals(r["obj2"].x, 'x2')
+
+        if is_filestorage:
+            # Again _data_find should find both objects in this txn, and
+            # again the second assert failed on my box.
+            self.assert_(self._storage._data_find(pos, obj1_oid, '') > 0)
+            self.assert_(self._storage._data_find(pos, obj2_oid, '') > 0)
+
+        # Indirectly provoke .restore().  .restore in turn indirectly
+        # provokes _data_find too, but not usefully for the purposes of
+        # the specific bug this test aims at:  copyTransactionsFrom() uses
+        # storage iterators that chase backpointers themselves, and
+        # return the data they point at instead.  The result is that
+        # _data_find didn't actually see anything dangerous in this
+        # part of the test.
+        self._dst.copyTransactionsFrom(self._storage)
+        self.compare(self._storage, self._dst)

Added: relstorage/trunk/relstorage/tests/blob/__init__.py
===================================================================
--- relstorage/trunk/relstorage/tests/blob/__init__.py	                        (rev 0)
+++ relstorage/trunk/relstorage/tests/blob/__init__.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -0,0 +1,4 @@
+"""This package contains test code copied from ZODB 3.9 with minor alterations.
+
+It is especially useful for testing RelStorage + ZODB 3.8.
+"""

Added: relstorage/trunk/relstorage/tests/blob/blob_connection.txt
===================================================================
--- relstorage/trunk/relstorage/tests/blob/blob_connection.txt	                        (rev 0)
+++ relstorage/trunk/relstorage/tests/blob/blob_connection.txt	2009-09-25 08:05:01 UTC (rev 104516)
@@ -0,0 +1,88 @@
+Connection support for Blobs tests
+==================================
+
+Connections handle Blobs specially. To demonstrate that, we first need a Blob
+with some data:
+
+    >>> from ZODB.interfaces import IBlob
+    >>> from ZODB.blob import Blob
+    >>> import transaction
+    >>> blob = Blob()
+    >>> data = blob.open("w")
+    >>> data.write("I'm a happy Blob.")
+    >>> data.close()
+
+We also need a database with a blob supporting storage.  (We're going to use
+FileStorage rather than MappingStorage here because we will want ``loadBefore``
+for one of our examples.)
+
+    >>> blob_storage = create_storage()
+    >>> from ZODB.DB import DB
+    >>> database = DB(blob_storage)
+
+Putting a Blob into a Connection works like every other object:
+
+    >>> connection = database.open()
+    >>> root = connection.root()
+    >>> root['myblob'] = blob
+    >>> transaction.commit()
+
+We can also commit a transaction that seats a blob into place without
+calling the blob's open method:
+
+    >>> nothing = transaction.begin()
+    >>> anotherblob = Blob()
+    >>> root['anotherblob'] = anotherblob
+    >>> nothing = transaction.commit()
+
+Getting stuff out of there works similarly:
+
+    >>> transaction2 = transaction.TransactionManager()
+    >>> connection2 = database.open(transaction_manager=transaction2)
+    >>> root = connection2.root()
+    >>> blob2 = root['myblob']
+    >>> IBlob.providedBy(blob2)
+    True
+    >>> blob2.open("r").read()
+    "I'm a happy Blob."
+    >>> transaction2.abort()
+
+MVCC also works.
+
+    >>> transaction3 = transaction.TransactionManager()
+    >>> connection3 = database.open(transaction_manager=transaction3)
+    >>> f = connection.root()['myblob'].open('w')
+    >>> f.write('I am an ecstatic Blob.')
+    >>> f.close()
+    >>> transaction.commit()
+    >>> connection3.root()['myblob'].open('r').read()
+    "I'm a happy Blob."
+
+    >>> transaction2.abort()
+    >>> transaction3.abort()
+    >>> connection2.close()
+    >>> connection3.close()
+
+You can't put blobs into a database that has uses a Non-Blob-Storage, though:
+
+    >>> from ZODB.MappingStorage import MappingStorage
+    >>> no_blob_storage = MappingStorage()
+    >>> database2 = DB(no_blob_storage)
+    >>> connection2 = database2.open(transaction_manager=transaction2)
+    >>> root = connection2.root()
+    >>> root['myblob'] = Blob()
+    >>> transaction2.commit()        # doctest: +ELLIPSIS
+    Traceback (most recent call last):
+        ...
+    Unsupported: Storing Blobs in <ZODB.MappingStorage.MappingStorage object at ...> is not supported.
+
+    >>> transaction2.abort()
+    >>> connection2.close()
+
+After testing this, we don't need the storage directory and databases anymore:
+
+    >>> transaction.abort()
+    >>> connection.close()
+    >>> database.close()
+    >>> database2.close()
+    >>> blob_storage.close()

Added: relstorage/trunk/relstorage/tests/blob/blob_importexport.txt
===================================================================
--- relstorage/trunk/relstorage/tests/blob/blob_importexport.txt	                        (rev 0)
+++ relstorage/trunk/relstorage/tests/blob/blob_importexport.txt	2009-09-25 08:05:01 UTC (rev 104516)
@@ -0,0 +1,78 @@
+##############################################################################
+#
+# Copyright (c) 2005 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+
+Import/export support for blob data
+===================================
+
+Set up:
+
+    >>> import ZODB.blob, transaction
+    >>> from persistent.mapping import PersistentMapping
+
+We need an database with an undoing blob supporting storage:
+
+    >>> database1 = ZODB.DB(create_storage('1'))
+    >>> database2 = ZODB.DB(create_storage('2'))
+
+Create our root object for database1:
+
+    >>> connection1 = database1.open()
+    >>> root1 = connection1.root()
+
+Put a couple blob objects in our database1 and on the filesystem:
+
+    >>> import time, os
+    >>> nothing = transaction.begin()
+    >>> data1 = 'x'*100000
+    >>> blob1 = ZODB.blob.Blob()
+    >>> blob1.open('w').write(data1)
+    >>> data2 = 'y'*100000
+    >>> blob2 = ZODB.blob.Blob()
+    >>> blob2.open('w').write(data2)
+    >>> d = PersistentMapping({'blob1':blob1, 'blob2':blob2})
+    >>> root1['blobdata'] = d
+    >>> transaction.commit()
+
+Export our blobs from a database1 connection:
+
+    >>> conn = root1['blobdata']._p_jar
+    >>> oid = root1['blobdata']._p_oid
+    >>> exportfile = 'export'
+    >>> nothing = connection1.exportFile(oid, exportfile)
+
+Import our exported data into database2:
+
+    >>> connection2 = database2.open()
+    >>> root2 = connection2.root()
+    >>> nothing = transaction.begin()
+    >>> data = root2._p_jar.importFile(exportfile)
+    >>> root2['blobdata'] = data
+    >>> transaction.commit()
+
+Make sure our data exists:
+
+    >>> items1 = root1['blobdata']
+    >>> items2 = root2['blobdata']
+    >>> bool(items1.keys() == items2.keys())
+    True
+    >>> items1['blob1'].open().read() == items2['blob1'].open().read()
+    True
+    >>> items1['blob2'].open().read() == items2['blob2'].open().read()
+    True
+    >>> transaction.get().abort()
+
+.. cleanup
+
+    >>> database1.close()
+    >>> database2.close()

Added: relstorage/trunk/relstorage/tests/blob/blob_packing.txt
===================================================================
--- relstorage/trunk/relstorage/tests/blob/blob_packing.txt	                        (rev 0)
+++ relstorage/trunk/relstorage/tests/blob/blob_packing.txt	2009-09-25 08:05:01 UTC (rev 104516)
@@ -0,0 +1,128 @@
+Packing support for blob data
+=============================
+
+Set up:
+
+    >>> from ZODB.serialize import referencesf
+    >>> from ZODB.blob import Blob
+    >>> from ZODB import utils
+    >>> from ZODB.DB import DB
+    >>> import transaction
+
+A helper method to assure a unique timestamp across multiple platforms:
+
+    >>> from relstorage.tests.blob.testblob import new_time
+
+UNDOING
+=======
+
+We need a database with an undoing blob supporting storage:
+
+    >>> blob_storage = create_storage()
+    >>> database = DB(blob_storage)
+
+Create our root object:
+
+    >>> connection1 = database.open()
+    >>> root = connection1.root()
+
+Put some revisions of a blob object in our database and on the filesystem:
+
+    >>> import os
+    >>> tids = []
+    >>> times = []
+    >>> nothing = transaction.begin()
+    >>> times.append(new_time())
+    >>> blob = Blob()
+    >>> blob.open('w').write('this is blob data 0')
+    >>> root['blob'] = blob
+    >>> transaction.commit()
+    >>> tids.append(blob._p_serial)
+
+    >>> nothing = transaction.begin()
+    >>> times.append(new_time())
+    >>> root['blob'].open('w').write('this is blob data 1')
+    >>> transaction.commit()
+    >>> tids.append(blob._p_serial)
+
+    >>> nothing = transaction.begin()
+    >>> times.append(new_time())
+    >>> root['blob'].open('w').write('this is blob data 2')
+    >>> transaction.commit()
+    >>> tids.append(blob._p_serial)
+
+    >>> nothing = transaction.begin()
+    >>> times.append(new_time())
+    >>> root['blob'].open('w').write('this is blob data 3')
+    >>> transaction.commit()
+    >>> tids.append(blob._p_serial)
+
+    >>> nothing = transaction.begin()
+    >>> times.append(new_time())
+    >>> root['blob'].open('w').write('this is blob data 4')
+    >>> transaction.commit()
+    >>> tids.append(blob._p_serial)
+
+    >>> oid = root['blob']._p_oid
+    >>> fns = [ blob_storage.fshelper.getBlobFilename(oid, x) for x in tids ]
+    >>> [ os.path.exists(x) for x in fns ]
+    [True, True, True, True, True]
+
+Do a pack to the slightly before the first revision was written:
+
+    >>> packtime = times[0]
+    >>> blob_storage.pack(packtime, referencesf)
+    >>> [ os.path.exists(x) for x in fns ]
+    [True, True, True, True, True]
+    
+Do a pack to the slightly before the second revision was written:
+
+    >>> packtime = times[1]
+    >>> blob_storage.pack(packtime, referencesf)
+    >>> [ os.path.exists(x) for x in fns ]
+    [True, True, True, True, True]
+
+Do a pack to the slightly before the third revision was written:
+
+    >>> packtime = times[2]
+    >>> blob_storage.pack(packtime, referencesf)
+    >>> [ os.path.exists(x) for x in fns ]
+    [False, True, True, True, True]
+
+Do a pack to the slightly before the fourth revision was written:
+
+    >>> packtime = times[3]
+    >>> blob_storage.pack(packtime, referencesf)
+    >>> [ os.path.exists(x) for x in fns ]
+    [False, False, True, True, True]
+
+Do a pack to the slightly before the fifth revision was written:
+
+    >>> packtime = times[4]
+    >>> blob_storage.pack(packtime, referencesf)
+    >>> [ os.path.exists(x) for x in fns ]
+    [False, False, False, True, True]
+
+Do a pack to now:
+
+    >>> packtime = new_time()
+    >>> blob_storage.pack(packtime, referencesf)
+    >>> [ os.path.exists(x) for x in fns ]
+    [False, False, False, False, True]
+
+Delete the object and do a pack, it should get rid of the most current
+revision as well as the entire directory:
+
+    >>> nothing = transaction.begin()
+    >>> del root['blob']
+    >>> transaction.commit()
+    >>> packtime = new_time()
+    >>> blob_storage.pack(packtime, referencesf)
+    >>> [ os.path.exists(x) for x in fns ]
+    [False, False, False, False, False]
+    >>> os.path.exists(os.path.split(fns[0])[0])
+    False
+
+Clean up our blob directory and database:
+
+    >>> blob_storage.close()

Added: relstorage/trunk/relstorage/tests/blob/blob_packing_history_free.txt
===================================================================
--- relstorage/trunk/relstorage/tests/blob/blob_packing_history_free.txt	                        (rev 0)
+++ relstorage/trunk/relstorage/tests/blob/blob_packing_history_free.txt	2009-09-25 08:05:01 UTC (rev 104516)
@@ -0,0 +1,132 @@
+Packing support for blob data
+=============================
+
+Set up:
+
+    >>> from ZODB.serialize import referencesf
+    >>> from ZODB.blob import Blob
+    >>> from ZODB import utils
+    >>> from ZODB.DB import DB
+    >>> import transaction
+
+A helper method to assure a unique timestamp across multiple platforms:
+
+    >>> from relstorage.tests.blob.testblob import new_time
+
+UNDOING
+=======
+
+We need a database with an undoing blob supporting storage:
+
+    >>> blob_storage = create_storage()
+    >>> database = DB(blob_storage)
+
+Create our root object:
+
+    >>> connection1 = database.open()
+    >>> root = connection1.root()
+
+Put some revisions of a blob object in our database and on the filesystem:
+
+    >>> import os
+    >>> tids = []
+    >>> times = []
+    >>> nothing = transaction.begin()
+    >>> times.append(new_time())
+    >>> blob = Blob()
+    >>> blob.open('w').write('this is blob data 0')
+    >>> root['blob'] = blob
+    >>> transaction.commit()
+    >>> tids.append(blob._p_serial)
+
+    >>> nothing = transaction.begin()
+    >>> times.append(new_time())
+    >>> root['blob'].open('w').write('this is blob data 1')
+    >>> transaction.commit()
+    >>> tids.append(blob._p_serial)
+
+    >>> nothing = transaction.begin()
+    >>> times.append(new_time())
+    >>> root['blob'].open('w').write('this is blob data 2')
+    >>> transaction.commit()
+    >>> tids.append(blob._p_serial)
+
+    >>> nothing = transaction.begin()
+    >>> times.append(new_time())
+    >>> root['blob'].open('w').write('this is blob data 3')
+    >>> transaction.commit()
+    >>> tids.append(blob._p_serial)
+
+    >>> nothing = transaction.begin()
+    >>> times.append(new_time())
+    >>> root['blob'].open('w').write('this is blob data 4')
+    >>> transaction.commit()
+    >>> tids.append(blob._p_serial)
+
+    >>> oid = root['blob']._p_oid
+    >>> fns = [ blob_storage.fshelper.getBlobFilename(oid, x) for x in tids ]
+    >>> [ os.path.exists(x) for x in fns ]
+    [True, True, True, True, True]
+
+
+NOTE: The following tests are temporarily disabled until the
+implementation matches the behavior described.
+
+#Do a pack to the slightly before the first revision was written:
+
+    #>>> packtime = times[0]
+    #>>> blob_storage.pack(packtime, referencesf)
+    #>>> [ os.path.exists(x) for x in fns ]
+    #[False, False, False, False, True]
+
+#Do a pack to the slightly before the second revision was written:
+
+    #>>> packtime = times[1]
+    #>>> blob_storage.pack(packtime, referencesf)
+    #>>> [ os.path.exists(x) for x in fns ]
+    #[False, False, False, False, True]
+
+#Do a pack to the slightly before the third revision was written:
+
+    #>>> packtime = times[2]
+    #>>> blob_storage.pack(packtime, referencesf)
+    #>>> [ os.path.exists(x) for x in fns ]
+    #[False, False, False, False, True]
+
+#Do a pack to the slightly before the fourth revision was written:
+
+    #>>> packtime = times[3]
+    #>>> blob_storage.pack(packtime, referencesf)
+    #>>> [ os.path.exists(x) for x in fns ]
+    #[False, False, False, False, True]
+
+#Do a pack to the slightly before the fifth revision was written:
+
+    #>>> packtime = times[4]
+    #>>> blob_storage.pack(packtime, referencesf)
+    #>>> [ os.path.exists(x) for x in fns ]
+    #[False, False, False, False, True]
+
+#Do a pack to now:
+
+    #>>> packtime = new_time()
+    #>>> blob_storage.pack(packtime, referencesf)
+    #>>> [ os.path.exists(x) for x in fns ]
+    #[False, False, False, False, True]
+
+Delete the object and do a pack, it should get rid of the most current
+revision as well as the entire directory:
+
+    >>> nothing = transaction.begin()
+    >>> del root['blob']
+    >>> transaction.commit()
+    >>> packtime = new_time()
+    >>> blob_storage.pack(packtime, referencesf)
+    >>> [ os.path.exists(x) for x in fns ]
+    [False, False, False, False, False]
+    >>> os.path.exists(os.path.split(fns[0])[0])
+    False
+
+Clean up our blob directory and database:
+
+    >>> blob_storage.close()

Added: relstorage/trunk/relstorage/tests/blob/blob_transaction.txt
===================================================================
--- relstorage/trunk/relstorage/tests/blob/blob_transaction.txt	                        (rev 0)
+++ relstorage/trunk/relstorage/tests/blob/blob_transaction.txt	2009-09-25 08:05:01 UTC (rev 104516)
@@ -0,0 +1,420 @@
+##############################################################################
+#
+# Copyright (c) 2005-2007 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+
+Transaction support for Blobs
+=============================
+
+We need a database with a blob supporting storage::
+
+    >>> import ZODB.blob, transaction
+    >>> blob_dir = 'blobs'
+    >>> blob_storage = create_storage(blob_dir=blob_dir)
+    >>> database = ZODB.DB(blob_storage)
+    >>> connection1 = database.open()
+    >>> root1 = connection1.root()
+
+Putting a Blob into a Connection works like any other Persistent object::
+
+    >>> blob1 = ZODB.blob.Blob()
+    >>> blob1.open('w').write('this is blob 1')
+    >>> root1['blob1'] = blob1
+    >>> 'blob1' in root1
+    True
+
+Aborting a blob add leaves the blob unchanged:
+
+    >>> transaction.abort()
+    >>> 'blob1' in root1
+    False
+
+    >>> blob1._p_oid
+    >>> blob1._p_jar
+    >>> blob1.open().read()
+    'this is blob 1'
+
+It doesn't clear the file because there is no previously committed version: 
+
+    >>> fname = blob1._p_blob_uncommitted
+    >>> import os
+    >>> os.path.exists(fname)
+    True
+
+Let's put the blob back into the root and commit the change:
+
+    >>> root1['blob1'] = blob1
+    >>> transaction.commit()
+
+Now, if we make a change and abort it, we'll return to the committed
+state:
+
+    >>> os.path.exists(fname)
+    False
+    >>> blob1._p_blob_uncommitted
+
+    >>> blob1.open('w').write('this is new blob 1')
+    >>> blob1.open().read()
+    'this is new blob 1'
+    >>> fname = blob1._p_blob_uncommitted
+    >>> os.path.exists(fname)
+    True
+
+    >>> transaction.abort()
+    >>> os.path.exists(fname)
+    False
+    >>> blob1._p_blob_uncommitted
+
+    >>> blob1.open().read()
+    'this is blob 1'
+
+Opening a blob gives us a filehandle.  Getting data out of the
+resulting filehandle is accomplished via the filehandle's read method::
+
+    >>> connection2 = database.open()
+    >>> root2 = connection2.root()
+    >>> blob1a = root2['blob1']
+
+    >>> blob1afh1 = blob1a.open("r")
+    >>> blob1afh1.read()
+    'this is blob 1'
+
+Let's make another filehandle for read only to blob1a. Aach file
+handle has a reference to the (same) underlying blob::
+
+    >>> blob1afh2 = blob1a.open("r")
+    >>> blob1afh2.blob is blob1afh1.blob
+    True
+
+Let's close the first filehandle we got from the blob::
+
+    >>> blob1afh1.close()
+
+Let's abort this transaction, and ensure that the filehandles that we
+opened are still open::
+
+    >>> transaction.abort()
+    >>> blob1afh2.read()
+    'this is blob 1'
+
+    >>> blob1afh2.close()
+
+If we open a blob for append, writing any number of bytes to the
+blobfile should result in the blob being marked "dirty" in the
+connection (we just aborted above, so the object should be "clean"
+when we start)::
+
+    >>> bool(blob1a._p_changed)
+    False
+    >>> blob1a.open('r').read()
+    'this is blob 1'
+    >>> blob1afh3 = blob1a.open('a')
+    >>> bool(blob1a._p_changed)
+    True
+    >>> blob1afh3.write('woot!')
+    >>> blob1afh3.close()
+
+We can open more than one blob object during the course of a single
+transaction::
+
+    >>> blob2 = ZODB.blob.Blob()
+    >>> blob2.open('w').write('this is blob 3')
+    >>> root2['blob2'] = blob2
+    >>> transaction.commit()
+
+Since we committed the current transaction above, the aggregate
+changes we've made to blob, blob1a (these refer to the same object) and
+blob2 (a different object) should be evident::
+
+    >>> blob1.open('r').read()
+    'this is blob 1woot!'
+    >>> blob1a.open('r').read()
+    'this is blob 1woot!'
+    >>> blob2.open('r').read()
+    'this is blob 3'
+
+We shouldn't be able to persist a blob filehandle at commit time
+(although the exception which is raised when an object cannot be
+pickled appears to be particulary unhelpful for casual users at the
+moment)::
+
+    >>> root1['wontwork'] = blob1.open('r')
+    >>> transaction.commit()
+    Traceback (most recent call last):
+        ...
+    TypeError: coercing to Unicode: need string or buffer, BlobFile found
+
+Abort for good measure::
+
+    >>> transaction.abort()
+
+Attempting to change a blob simultaneously from two different
+connections should result in a write conflict error::
+
+    >>> tm1 = transaction.TransactionManager()
+    >>> tm2 = transaction.TransactionManager()
+    >>> root3 = database.open(transaction_manager=tm1).root()
+    >>> root4 = database.open(transaction_manager=tm2).root()
+    >>> blob1c3 = root3['blob1']
+    >>> blob1c4 = root4['blob1']
+    >>> blob1c3fh1 = blob1c3.open('a').write('this is from connection 3')
+    >>> blob1c4fh1 = blob1c4.open('a').write('this is from connection 4')
+    >>> tm1.commit()
+    >>> root3['blob1'].open('r').read()
+    'this is blob 1woot!this is from connection 3'
+    >>> tm2.commit()
+    Traceback (most recent call last):
+        ...
+    ConflictError: database conflict error (oid 0x01, class ZODB.blob.Blob...)
+
+After the conflict, the winning transaction's result is visible on both
+connections::
+
+    >>> root3['blob1'].open('r').read()
+    'this is blob 1woot!this is from connection 3'
+    >>> tm2.abort()
+    >>> root4['blob1'].open('r').read()
+    'this is blob 1woot!this is from connection 3'
+
+You can't commit a transaction while blob files are open:
+
+    >>> f = root3['blob1'].open('w')
+    >>> tm1.commit()
+    Traceback (most recent call last):
+    ...
+    ValueError: Can't commit with opened blobs.
+
+    >>> f.close()
+    >>> tm1.abort()
+    >>> f = root3['blob1'].open('w')
+    >>> f.close()
+
+    >>> f = root3['blob1'].open('r')
+    >>> tm1.commit()
+    Traceback (most recent call last):
+    ...
+    ValueError: Can't commit with opened blobs.
+    >>> f.close()
+    >>> tm1.abort()
+
+Savepoints and Blobs
+--------------------
+
+We do support optimistic savepoints:
+
+    >>> connection5 = database.open()
+    >>> root5 = connection5.root()
+    >>> blob = ZODB.blob.Blob()
+    >>> blob_fh = blob.open("w")
+    >>> blob_fh.write("I'm a happy blob.")
+    >>> blob_fh.close()
+    >>> root5['blob'] = blob
+    >>> transaction.commit()
+    >>> root5['blob'].open("r").read()
+    "I'm a happy blob."
+    >>> blob_fh = root5['blob'].open("a")
+    >>> blob_fh.write(" And I'm singing.")
+    >>> blob_fh.close()
+    >>> root5['blob'].open("r").read()
+    "I'm a happy blob. And I'm singing."
+    >>> savepoint = transaction.savepoint(optimistic=True)
+
+    >>> root5['blob'].open("r").read()
+    "I'm a happy blob. And I'm singing."
+
+Savepoints store the blobs in temporary directories in the temporary
+directory of the blob storage:
+
+    >>> len([name for name in os.listdir(os.path.join(blob_dir, 'tmp'))
+    ...      if name.startswith('savepoint')])
+    1
+
+After committing the transaction, the temporary savepoint files are moved to
+the committed location again:
+
+    >>> transaction.commit()
+    >>> len([name for name in os.listdir(os.path.join(blob_dir, 'tmp'))
+    ...      if name.startswith('savepoint')])
+    0
+
+We support non-optimistic savepoints too:
+
+    >>> root5['blob'].open("a").write(" And I'm dancing.")
+    >>> root5['blob'].open("r").read()
+    "I'm a happy blob. And I'm singing. And I'm dancing."
+    >>> savepoint = transaction.savepoint()
+
+Again, the savepoint creates a new savepoints directory:
+
+    >>> len([name for name in os.listdir(os.path.join(blob_dir, 'tmp'))
+    ...      if name.startswith('savepoint')])
+    1
+
+    >>> root5['blob'].open("w").write(" And the weather is beautiful.")
+    >>> savepoint.rollback()
+
+    >>> root5['blob'].open("r").read()
+    "I'm a happy blob. And I'm singing. And I'm dancing."
+    >>> transaction.abort()
+
+The savepoint blob directory gets cleaned up on an abort:
+
+    >>> len([name for name in os.listdir(os.path.join(blob_dir, 'tmp'))
+    ...      if name.startswith('savepoint')])
+    0
+
+Reading Blobs outside of a transaction
+--------------------------------------
+
+If you want to read from a Blob outside of transaction boundaries (e.g. to
+stream a file to the browser), committed method to get the name of a
+file that can be opened.
+
+    >>> connection6 = database.open()
+    >>> root6 = connection6.root()
+    >>> blob = ZODB.blob.Blob()
+    >>> blob_fh = blob.open("w")
+    >>> blob_fh.write("I'm a happy blob.")
+    >>> blob_fh.close()
+    >>> root6['blob'] = blob
+    >>> transaction.commit()
+    >>> open(blob.committed()).read()
+    "I'm a happy blob."
+
+We can also read committed data by calling open with a 'c' flag:
+
+    >>> f = blob.open('c')
+
+This just returns a regular file object:
+
+    >>> type(f)
+    <type 'file'>
+
+and doesn't prevent us from opening the blob for writing:
+
+    >>> blob.open('w').write('x')
+    >>> blob.open().read()
+    'x'
+
+    >>> f.read()
+    "I'm a happy blob."
+
+    >>> f.close()
+    >>> transaction.abort()
+
+An exception is raised if we call committed on a blob that has
+uncommitted changes:
+
+    >>> blob = ZODB.blob.Blob()
+    >>> blob.committed()
+    Traceback (most recent call last):
+    ...
+    BlobError: Uncommitted changes
+
+    >>> blob.open('c')
+    Traceback (most recent call last):
+    ...
+    BlobError: Uncommitted changes
+
+    >>> blob.open('w').write("I'm a happy blob.")
+    >>> root6['blob6'] = blob
+    >>> blob.committed()
+    Traceback (most recent call last):
+    ...
+    BlobError: Uncommitted changes
+
+    >>> blob.open('c')
+    Traceback (most recent call last):
+    ...
+    BlobError: Uncommitted changes
+
+    >>> s = transaction.savepoint()
+    >>> blob.committed()
+    Traceback (most recent call last):
+    ...
+    BlobError: Uncommitted changes
+
+    >>> blob.open('c')
+    Traceback (most recent call last):
+    ...
+    BlobError: Uncommitted changes
+
+    >>> transaction.commit()
+    >>> open(blob.committed()).read()
+    "I'm a happy blob."
+
+You can't open a committed blob file for writing:
+
+    >>> open(blob.committed(), 'w') # doctest: +ELLIPSIS
+    Traceback (most recent call last):
+    ...
+    IOError: ...
+
+tpc_abort
+---------
+
+If a transaction is aborted in the middle of 2-phase commit, any data
+stored are discarded.
+
+    >>> olddata, oldserial = blob_storage.load(blob._p_oid, '')
+    >>> t = transaction.get()
+    >>> blob_storage.tpc_begin(t)
+    >>> open('blobfile', 'w').write('This data should go away')
+    >>> s1 = blob_storage.storeBlob(blob._p_oid, oldserial, olddata, 'blobfile',
+    ...                             '', t)
+    >>> new_oid = blob_storage.new_oid()
+    >>> open('blobfile2', 'w').write('This data should go away too')
+    >>> s2 = blob_storage.storeBlob(new_oid, '\0'*8, olddata, 'blobfile2',
+    ...                             '', t)
+
+    >>> serials = blob_storage.tpc_vote(t)
+    >>> if s1 is None:
+    ...     s1 = [s for (oid, s) in serials if oid == blob._p_oid][0]
+    >>> if s2 is None:
+    ...     s2 = [s for (oid, s) in serials if oid == new_oid][0]
+
+    >>> blob_storage.tpc_abort(t)
+
+Now, the serial for the existing blob should be the same:
+
+    >>> blob_storage.load(blob._p_oid, '') == (olddata, oldserial)
+    True
+
+And we shouldn't be able to read the data that we saved:
+
+    >>> blob_storage.loadBlob(blob._p_oid, s1)
+    Traceback (most recent call last):
+    ...
+    POSKeyError: 'No blob file'
+
+Of course the old data should be unaffected:
+
+    >>> open(blob_storage.loadBlob(blob._p_oid, oldserial)).read()
+    "I'm a happy blob."
+
+Similarly, the new object wasn't added to the storage:
+
+    >>> blob_storage.load(new_oid, '')
+    Traceback (most recent call last):
+    ...
+    POSKeyError: 0x...
+
+    >>> blob_storage.loadBlob(blob._p_oid, s2)
+    Traceback (most recent call last):
+    ...
+    POSKeyError: 'No blob file'
+
+.. clean up
+
+    >>> tm1.abort()
+    >>> tm2.abort()
+    >>> database.close()

Added: relstorage/trunk/relstorage/tests/blob/testblob.py
===================================================================
--- relstorage/trunk/relstorage/tests/blob/testblob.py	                        (rev 0)
+++ relstorage/trunk/relstorage/tests/blob/testblob.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -0,0 +1,517 @@
+##############################################################################
+#
+# Copyright (c) 2004 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+
+from ZODB.blob import Blob
+from ZODB.DB import DB
+from zope.testing import doctest
+
+import os
+import random
+import re
+import struct
+import sys
+import time
+import transaction
+import unittest
+import ZODB.blob
+import ZODB.interfaces
+from relstorage.tests.RecoveryStorage import IteratorDeepCompare
+import ZODB.tests.StorageTestBase
+import ZODB.tests.util
+import zope.testing.renormalizing
+
+def new_time():
+    """Create a _new_ time stamp.
+
+    This method also makes sure that after retrieving a timestamp that was
+    *before* a transaction was committed, that at least one second passes so
+    the packing time actually is before the commit time.
+
+    """
+    now = new_time = time.time()
+    while new_time <= now:
+        new_time = time.time()
+    time.sleep(1)
+    return new_time
+
+
+class BlobTestBase(ZODB.tests.StorageTestBase.StorageTestBase):
+
+    def setUp(self):
+        ZODB.tests.StorageTestBase.StorageTestBase.setUp(self)
+        self._storage = self.create_storage()
+
+
+class BlobUndoTests(BlobTestBase):
+
+    def testUndoWithoutPreviousVersion(self):
+        database = DB(self._storage)
+        connection = database.open()
+        root = connection.root()
+        transaction.begin()
+        root['blob'] = Blob()
+        transaction.commit()
+
+        database.undo(database.undoLog(0, 1)[0]['id'])
+        transaction.commit()
+
+        # the blob footprint object should exist no longer
+        self.assertRaises(KeyError, root.__getitem__, 'blob')
+        database.close()
+        
+    def testUndo(self):
+        database = DB(self._storage)
+        connection = database.open()
+        root = connection.root()
+        transaction.begin()
+        blob = Blob()
+        blob.open('w').write('this is state 1')
+        root['blob'] = blob
+        transaction.commit()
+
+        transaction.begin()
+        blob = root['blob']
+        blob.open('w').write('this is state 2')
+        transaction.commit()
+
+
+        database.undo(database.undoLog(0, 1)[0]['id'])
+        transaction.commit()
+        self.assertEqual(blob.open('r').read(), 'this is state 1')
+
+        database.close()
+
+    def testUndoAfterConsumption(self):
+        database = DB(self._storage)
+        connection = database.open()
+        root = connection.root()
+        transaction.begin()
+        open('consume1', 'w').write('this is state 1')
+        blob = Blob()
+        blob.consumeFile('consume1')
+        root['blob'] = blob
+        transaction.commit()
+        
+        transaction.begin()
+        blob = root['blob']
+        open('consume2', 'w').write('this is state 2')
+        blob.consumeFile('consume2')
+        transaction.commit()
+
+        database.undo(database.undoLog(0, 1)[0]['id'])
+        transaction.commit()
+
+        self.assertEqual(blob.open('r').read(), 'this is state 1')
+
+        database.close()
+
+    def testRedo(self):
+        database = DB(self._storage)
+        connection = database.open()
+        root = connection.root()
+        blob = Blob()
+
+        transaction.begin()
+        blob.open('w').write('this is state 1')
+        root['blob'] = blob
+        transaction.commit()
+
+        transaction.begin()
+        blob = root['blob']
+        blob.open('w').write('this is state 2')
+        transaction.commit()
+
+        database.undo(database.undoLog(0, 1)[0]['id'])
+        transaction.commit()
+
+        self.assertEqual(blob.open('r').read(), 'this is state 1')
+
+        database.undo(database.undoLog(0, 1)[0]['id'])
+        transaction.commit()
+
+        self.assertEqual(blob.open('r').read(), 'this is state 2')
+
+        database.close()
+
+    def testRedoOfCreation(self):
+        database = DB(self._storage)
+        connection = database.open()
+        root = connection.root()
+        blob = Blob()
+
+        transaction.begin()
+        blob.open('w').write('this is state 1')
+        root['blob'] = blob
+        transaction.commit()
+
+        database.undo(database.undoLog(0, 1)[0]['id'])
+        transaction.commit()
+
+        self.assertRaises(KeyError, root.__getitem__, 'blob')
+
+        database.undo(database.undoLog(0, 1)[0]['id'])
+        transaction.commit()
+
+        self.assertEqual(blob.open('r').read(), 'this is state 1')
+
+        database.close()
+
+
+class RecoveryBlobStorage(BlobTestBase,
+                          IteratorDeepCompare):
+
+    def setUp(self):
+        BlobTestBase.setUp(self)
+        self._dst = self.create_storage('dest')
+
+    def tearDown(self):
+        self._dst.close()
+        BlobTestBase.tearDown(self)
+
+    # Requires a setUp() that creates a self._dst destination storage
+    def testSimpleBlobRecovery(self):
+        self.assert_(
+            ZODB.interfaces.IBlobStorageRestoreable.providedBy(self._storage)
+            )
+        db = DB(self._storage)
+        conn = db.open()
+        conn.root()[1] = ZODB.blob.Blob()
+        transaction.commit()
+        conn.root()[2] = ZODB.blob.Blob()
+        conn.root()[2].open('w').write('some data')
+        transaction.commit()
+        conn.root()[3] = ZODB.blob.Blob()
+        conn.root()[3].open('w').write(
+            (''.join(struct.pack(">I", random.randint(0, (1<<32)-1))
+                     for i in range(random.randint(10000,20000)))
+             )[:-random.randint(1,4)]
+            )
+        transaction.commit()
+        conn.root()[2] = ZODB.blob.Blob()
+        conn.root()[2].open('w').write('some other data')
+        transaction.commit()
+        self._dst.copyTransactionsFrom(self._storage)
+        self.compare(self._storage, self._dst)
+    
+
+def packing_with_uncommitted_data_non_undoing():
+    """
+    This covers regression for bug #130459.
+
+    When uncommitted data exists it formerly was written to the root of the
+    blob_directory and confused our packing strategy. We now use a separate
+    temporary directory that is ignored while packing.
+
+    >>> import transaction
+    >>> from ZODB.DB import DB
+    >>> from ZODB.serialize import referencesf
+
+    >>> blob_storage = create_storage()
+    >>> database = DB(blob_storage)
+    >>> connection = database.open()
+    >>> root = connection.root()
+    >>> from ZODB.blob import Blob
+    >>> root['blob'] = Blob()
+    >>> connection.add(root['blob'])
+    >>> root['blob'].open('w').write('test')
+
+    >>> blob_storage.pack(new_time(), referencesf)
+
+    Clean up:
+
+    >>> database.close()
+
+    """
+
+def packing_with_uncommitted_data_undoing():
+    """
+    This covers regression for bug #130459.
+
+    When uncommitted data exists it formerly was written to the root of the
+    blob_directory and confused our packing strategy. We now use a separate
+    temporary directory that is ignored while packing.
+
+    >>> from ZODB.serialize import referencesf
+
+    >>> blob_storage = create_storage()
+    >>> database = DB(blob_storage)
+    >>> connection = database.open()
+    >>> root = connection.root()
+    >>> from ZODB.blob import Blob
+    >>> root['blob'] = Blob()
+    >>> connection.add(root['blob'])
+    >>> root['blob'].open('w').write('test')
+
+    >>> blob_storage.pack(new_time(), referencesf)
+
+    Clean up:
+
+    >>> database.close()
+    """
+
+
+def secure_blob_directory():
+    """
+    This is a test for secure creation and verification of secure settings of
+    blob directories.
+
+    >>> blob_storage = create_storage(blob_dir='blobs')
+
+    Two directories are created:
+
+    >>> os.path.isdir('blobs')
+    True
+    >>> tmp_dir = os.path.join('blobs', 'tmp')
+    >>> os.path.isdir(tmp_dir)
+    True
+
+    They are only accessible by the owner:
+
+    >>> oct(os.stat('blobs').st_mode)
+    '040700'
+    >>> oct(os.stat(tmp_dir).st_mode)
+    '040700'
+
+    These settings are recognized as secure:
+
+    >>> blob_storage.fshelper.isSecure('blobs')
+    True
+    >>> blob_storage.fshelper.isSecure(tmp_dir)
+    True
+
+    After making the permissions of tmp_dir more liberal, the directory is
+    recognized as insecure:
+
+    >>> os.chmod(tmp_dir, 040711)
+    >>> blob_storage.fshelper.isSecure(tmp_dir)
+    False
+
+    Clean up:
+
+    >>> blob_storage.close()
+
+    """
+
+# On windows, we can't create secure blob directories, at least not
+# with APIs in the standard library, so there's no point in testing
+# this.
+if sys.platform == 'win32':
+    del secure_blob_directory
+
+def loadblob_tmpstore():
+    """
+    This is a test for assuring that the TmpStore's loadBlob implementation
+    falls back correctly to loadBlob on the backend.
+
+    First, let's setup a regular database and store a blob:
+
+    >>> blob_storage = create_storage()
+    >>> database = DB(blob_storage)
+    >>> connection = database.open()
+    >>> root = connection.root()
+    >>> from ZODB.blob import Blob
+    >>> root['blob'] = Blob()
+    >>> connection.add(root['blob'])
+    >>> root['blob'].open('w').write('test')
+    >>> import transaction
+    >>> transaction.commit()
+    >>> blob_oid = root['blob']._p_oid
+    >>> tid = connection._storage.lastTransaction()
+
+    Now we open a database with a TmpStore in front:
+
+    >>> database.close()
+
+    >>> from ZODB.Connection import TmpStore
+    >>> tmpstore = TmpStore(blob_storage)
+
+    We can access the blob correctly:
+
+    >>> tmpstore.loadBlob(blob_oid, tid) == blob_storage.loadBlob(blob_oid, tid)
+    True
+
+    Clean up:
+
+    >>> tmpstore.close()
+    >>> database.close()
+    """
+
+def is_blob_record():
+    r"""
+    >>> bs = create_storage()
+    >>> db = DB(bs)
+    >>> conn = db.open()
+    >>> conn.root()['blob'] = ZODB.blob.Blob()
+    >>> transaction.commit()
+    >>> ZODB.blob.is_blob_record(bs.load(ZODB.utils.p64(0), '')[0])
+    False
+    >>> ZODB.blob.is_blob_record(bs.load(ZODB.utils.p64(1), '')[0])
+    True
+
+    An invalid pickle yields a false value:
+
+    >>> ZODB.blob.is_blob_record("Hello world!")
+    False
+    >>> ZODB.blob.is_blob_record('c__main__\nC\nq\x01.')
+    False
+    >>> ZODB.blob.is_blob_record('cWaaaa\nC\nq\x01.')
+    False
+
+    As does None, which may occur in delete records:
+
+    >>> ZODB.blob.is_blob_record(None)
+    False
+
+    >>> db.close()
+    """
+
+def do_not_depend_on_cwd():
+    """
+    >>> bs = create_storage()
+    >>> here = os.getcwd()
+    >>> os.mkdir('evil')
+    >>> os.chdir('evil')
+    >>> db = DB(bs)
+    >>> conn = db.open()
+    >>> conn.root()['blob'] = ZODB.blob.Blob()
+    >>> conn.root()['blob'].open('w').write('data')
+    >>> transaction.commit()
+    >>> os.chdir(here)
+    >>> conn.root()['blob'].open().read()
+    'data'
+
+    >>> bs.close()
+    """
+
+def savepoint_isolation():
+    """Make sure savepoint data is distinct accross transactions
+
+    >>> bs = create_storage()
+    >>> db = DB(bs)
+    >>> conn = db.open()
+    >>> conn.root.b = ZODB.blob.Blob('initial')
+    >>> transaction.commit()
+    >>> conn.root.b.open('w').write('1')
+    >>> _ = transaction.savepoint()
+    >>> tm = transaction.TransactionManager()
+    >>> conn2 = db.open(transaction_manager=tm)
+    >>> conn2.root.b.open('w').write('2')
+    >>> _ = tm.savepoint()
+    >>> conn.root.b.open().read()
+    '1'
+    >>> conn2.root.b.open().read()
+    '2'
+    >>> transaction.abort()
+    >>> tm.commit()
+    >>> conn.sync()
+    >>> conn.root.b.open().read()
+    '2'
+    >>> db.close()
+    """
+
+def savepoint_cleanup():
+    """Make sure savepoint data gets cleaned up.
+
+    >>> bs = create_storage()
+    >>> tdir = bs.temporaryDirectory()
+    >>> os.listdir(tdir)
+    []
+
+    >>> db = DB(bs)
+    >>> conn = db.open()
+    >>> conn.root.b = ZODB.blob.Blob('initial')
+    >>> _ = transaction.savepoint()
+    >>> len(os.listdir(tdir))
+    1
+    >>> transaction.abort()
+    >>> os.listdir(tdir)
+    []
+    >>> conn.root.b = ZODB.blob.Blob('initial')
+    >>> transaction.commit()
+    >>> conn.root.b.open('w').write('1')
+    >>> _ = transaction.savepoint()
+    >>> transaction.abort()
+    >>> os.listdir(tdir)
+    []
+
+    >>> db.close()
+    """
+
+
+def setUp(test):
+    ZODB.tests.util.setUp(test)
+    test.globs['rmtree'] = zope.testing.setupstack.rmtree
+
+
+def storage_reusable_suite(prefix, factory,
+                           test_blob_storage_recovery=False,
+                           test_packing=False,
+                           test_undo=True,
+                           keep_history=True,
+                           pack_test_name='blob_packing.txt',
+                           ):
+    """Return a test suite for a generic IBlobStorage.
+
+    Pass a factory taking a name and a blob directory name.
+    """
+
+    def setup(test):
+        setUp(test)
+        def create_storage(name='data', blob_dir=None):
+            if blob_dir is None:
+                blob_dir = '%s.bobs' % name
+            return factory(name, blob_dir)
+
+        test.globs['create_storage'] = create_storage
+    
+    suite = unittest.TestSuite()
+    suite.addTest(doctest.DocFileSuite(
+        "blob_connection.txt", "blob_importexport.txt",
+        "blob_transaction.txt",
+        setUp=setup, tearDown=zope.testing.setupstack.tearDown,
+        optionflags=doctest.ELLIPSIS,
+        ))
+    if test_packing:
+        suite.addTest(doctest.DocFileSuite(
+            pack_test_name,
+            setUp=setup, tearDown=zope.testing.setupstack.tearDown,
+            ))
+    suite.addTest(doctest.DocTestSuite(
+        setUp=setup, tearDown=zope.testing.setupstack.tearDown,
+        checker = zope.testing.renormalizing.RENormalizing([
+            (re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
+            (re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'),
+            ]),
+        ))
+
+    def create_storage(self, name='data', blob_dir=None):
+        if blob_dir is None:
+            blob_dir = '%s.bobs' % name
+        return factory(name, blob_dir)
+
+    def add_test_based_on_test_class(class_):
+        new_class = class_.__class__(
+            prefix+class_.__name__, (class_, ),
+            dict(create_storage=create_storage),
+            )
+        suite.addTest(unittest.makeSuite(new_class))
+
+    if test_blob_storage_recovery:
+        add_test_based_on_test_class(RecoveryBlobStorage)
+    if test_undo:
+        add_test_based_on_test_class(BlobUndoTests)
+
+    suite.layer = ZODB.tests.util.MininalTestLayer(prefix+'BlobTests')
+
+    return suite

Added: relstorage/trunk/relstorage/tests/hftestbase.py
===================================================================
--- relstorage/trunk/relstorage/tests/hftestbase.py	                        (rev 0)
+++ relstorage/trunk/relstorage/tests/hftestbase.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -0,0 +1,299 @@
+##############################################################################
+#
+# Copyright (c) 2008 Zope Foundation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""A foundation for history-free RelStorage tests"""
+
+from relstorage.tests.RecoveryStorage import BasicRecoveryStorage
+from relstorage.tests.RecoveryStorage import UndoableRecoveryStorage
+from relstorage.tests.reltestbase import GenericRelStorageTests
+from relstorage.tests.reltestbase import RelStorageTestBase
+from ZODB.FileStorage import FileStorage
+from ZODB.serialize import referencesf
+from ZODB.tests.ConflictResolution import PCounter
+from ZODB.tests.PackableStorage import dumps
+from ZODB.tests.PackableStorage import pdumps
+from ZODB.tests.PackableStorage import Root
+from ZODB.tests.PackableStorage import ZERO
+from ZODB.tests.StorageTestBase import zodb_pickle
+from ZODB.tests.StorageTestBase import zodb_unpickle
+import cPickle
+import time
+
+
+class HistoryFreeRelStorageTests(
+    GenericRelStorageTests,
+    ):
+
+    keep_history = False
+
+    # This overrides certain tests so they work with a storage that
+    # collects garbage but does not retain old versions.
+
+    def checkPackAllRevisions(self):
+        self._initroot()
+        eq = self.assertEqual
+        raises = self.assertRaises
+        # Create a `persistent' object
+        obj = self._newobj()
+        oid = obj.getoid()
+        obj.value = 1
+        # Commit three different revisions
+        revid1 = self._dostoreNP(oid, data=pdumps(obj))
+        obj.value = 2
+        revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
+        obj.value = 3
+        revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
+        # Now make sure only the latest revision can be extracted
+        raises(KeyError, self._storage.loadSerial, oid, revid1)
+        raises(KeyError, self._storage.loadSerial, oid, revid2)
+        data = self._storage.loadSerial(oid, revid3)
+        pobj = cPickle.loads(data)
+        eq(pobj.getoid(), oid)
+        eq(pobj.value, 3)
+        # Now pack all transactions; need to sleep a second to make
+        # sure that the pack time is greater than the last commit time.
+        now = packtime = time.time()
+        while packtime <= now:
+            packtime = time.time()
+        self._storage.pack(packtime, referencesf)
+        self._storage.sync()
+        # All revisions of the object should be gone, since there is no
+        # reference from the root object to this object.
+        raises(KeyError, self._storage.loadSerial, oid, revid1)
+        raises(KeyError, self._storage.loadSerial, oid, revid2)
+        raises(KeyError, self._storage.loadSerial, oid, revid3)
+        raises(KeyError, self._storage.load, oid, '')
+
+    def checkPackJustOldRevisions(self):
+        eq = self.assertEqual
+        raises = self.assertRaises
+        loads = self._makeloader()
+        # Create a root object.  This can't be an instance of Object,
+        # otherwise the pickling machinery will serialize it as a persistent
+        # id and not as an object that contains references (persistent ids) to
+        # other objects.
+        root = Root()
+        # Create a persistent object, with some initial state
+        obj = self._newobj()
+        oid = obj.getoid()
+        # Link the root object to the persistent object, in order to keep the
+        # persistent object alive.  Store the root object.
+        root.obj = obj
+        root.value = 0
+        revid0 = self._dostoreNP(ZERO, data=dumps(root))
+        # Make sure the root can be retrieved
+        data, revid = self._storage.load(ZERO, '')
+        eq(revid, revid0)
+        eq(loads(data).value, 0)
+        # Commit three different revisions of the other object
+        obj.value = 1
+        revid1 = self._dostoreNP(oid, data=pdumps(obj))
+        obj.value = 2
+        revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
+        obj.value = 3
+        revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
+        # Now make sure only the latest revision can be extracted
+        raises(KeyError, self._storage.loadSerial, oid, revid1)
+        raises(KeyError, self._storage.loadSerial, oid, revid2)
+        data = self._storage.loadSerial(oid, revid3)
+        pobj = cPickle.loads(data)
+        eq(pobj.getoid(), oid)
+        eq(pobj.value, 3)
+        # Now pack.  The object should stay alive because it's pointed
+        # to by the root.
+        now = packtime = time.time()
+        while packtime <= now:
+            packtime = time.time()
+        self._storage.pack(packtime, referencesf)
+        # Make sure the revisions are gone, but that object zero and revision
+        # 3 are still there and correct
+        data, revid = self._storage.load(ZERO, '')
+        eq(revid, revid0)
+        eq(loads(data).value, 0)
+        raises(KeyError, self._storage.loadSerial, oid, revid1)
+        raises(KeyError, self._storage.loadSerial, oid, revid2)
+        data = self._storage.loadSerial(oid, revid3)
+        pobj = cPickle.loads(data)
+        eq(pobj.getoid(), oid)
+        eq(pobj.value, 3)
+        data, revid = self._storage.load(oid, '')
+        eq(revid, revid3)
+        pobj = cPickle.loads(data)
+        eq(pobj.getoid(), oid)
+        eq(pobj.value, 3)
+
+    def checkPackOnlyOneObject(self):
+        eq = self.assertEqual
+        raises = self.assertRaises
+        loads = self._makeloader()
+        # Create a root object.  This can't be an instance of Object,
+        # otherwise the pickling machinery will serialize it as a persistent
+        # id and not as an object that contains references (persistent ids) to
+        # other objects.
+        root = Root()
+        # Create a persistent object, with some initial state
+        obj1 = self._newobj()
+        oid1 = obj1.getoid()
+        # Create another persistent object, with some initial state.
+        obj2 = self._newobj()
+        oid2 = obj2.getoid()
+        # Link the root object to the persistent objects, in order to keep
+        # them alive.  Store the root object.
+        root.obj1 = obj1
+        root.obj2 = obj2
+        root.value = 0
+        revid0 = self._dostoreNP(ZERO, data=dumps(root))
+        # Make sure the root can be retrieved
+        data, revid = self._storage.load(ZERO, '')
+        eq(revid, revid0)
+        eq(loads(data).value, 0)
+        # Commit three different revisions of the first object
+        obj1.value = 1
+        revid1 = self._dostoreNP(oid1, data=pdumps(obj1))
+        obj1.value = 2
+        revid2 = self._dostoreNP(oid1, revid=revid1, data=pdumps(obj1))
+        obj1.value = 3
+        revid3 = self._dostoreNP(oid1, revid=revid2, data=pdumps(obj1))
+        # Now make sure only the latest revision can be extracted
+        raises(KeyError, self._storage.loadSerial, oid1, revid1)
+        raises(KeyError, self._storage.loadSerial, oid1, revid2)
+        data = self._storage.loadSerial(oid1, revid3)
+        pobj = cPickle.loads(data)
+        eq(pobj.getoid(), oid1)
+        eq(pobj.value, 3)
+        # Now commit a revision of the second object
+        obj2.value = 11
+        revid4 = self._dostoreNP(oid2, data=pdumps(obj2))
+        # And make sure the revision can be extracted
+        data = self._storage.loadSerial(oid2, revid4)
+        pobj = cPickle.loads(data)
+        eq(pobj.getoid(), oid2)
+        eq(pobj.value, 11)
+        # Now pack just revisions 1 and 2 of object1.  Object1's current
+        # revision should stay alive because it's pointed to by the root, as
+        # should Object2's current revision.
+        now = packtime = time.time()
+        while packtime <= now:
+            packtime = time.time()
+        self._storage.pack(packtime, referencesf)
+        # Make sure the revisions are gone, but that object zero, object2, and
+        # revision 3 of object1 are still there and correct.
+        data, revid = self._storage.load(ZERO, '')
+        eq(revid, revid0)
+        eq(loads(data).value, 0)
+        raises(KeyError, self._storage.loadSerial, oid1, revid1)
+        raises(KeyError, self._storage.loadSerial, oid1, revid2)
+        data = self._storage.loadSerial(oid1, revid3)
+        pobj = cPickle.loads(data)
+        eq(pobj.getoid(), oid1)
+        eq(pobj.value, 3)
+        data, revid = self._storage.load(oid1, '')
+        eq(revid, revid3)
+        pobj = cPickle.loads(data)
+        eq(pobj.getoid(), oid1)
+        eq(pobj.value, 3)
+        data, revid = self._storage.load(oid2, '')
+        eq(revid, revid4)
+        eq(loads(data).value, 11)
+        data = self._storage.loadSerial(oid2, revid4)
+        pobj = cPickle.loads(data)
+        eq(pobj.getoid(), oid2)
+        eq(pobj.value, 11)
+
+    def checkResolve(self):
+        obj = PCounter()
+        obj.inc()
+
+        oid = self._storage.new_oid()
+
+        revid1 = self._dostoreNP(oid, data=zodb_pickle(obj))
+
+        obj.inc()
+        obj.inc()
+
+        # The effect of committing two transactions with the same
+        # pickle is to commit two different transactions relative to
+        # revid1 that add two to _value.
+
+        # open s1
+        s1 = self._storage.new_instance()
+        # start a load transaction in s1
+        s1.poll_invalidations()
+
+        # commit a change
+        revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
+
+        # commit a conflicting change using s1
+        main_storage = self._storage
+        self._storage = s1
+        try:
+            # we can resolve this conflict because s1 has an open
+            # transaction that can read the old state of the object.
+            revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
+            s1.release()
+        finally:
+            self._storage = main_storage
+
+        data, serialno = self._storage.load(oid, '')
+        inst = zodb_unpickle(data)
+        self.assertEqual(inst._value, 5)
+
+
+class HistoryFreeToFileStorage(
+        RelStorageTestBase,
+        BasicRecoveryStorage,
+        ):
+
+    keep_history = False
+
+    def setUp(self):
+        self.open(create=1)
+        self._storage.zap_all()
+        self._dst = FileStorage("Dest.fs", create=True)
+
+    def tearDown(self):
+        self._storage.close()
+        self._dst.close()
+        self._storage.cleanup()
+        self._dst.cleanup()
+
+    def new_dest(self):
+        return FileStorage('Dest.fs')
+
+
+class HistoryFreeFromFileStorage(
+        RelStorageTestBase,
+        UndoableRecoveryStorage,
+        ):
+
+    keep_history = False
+
+    def setUp(self):
+        self.open(create=1)
+        self._storage.zap_all()
+        self._dst = self._storage
+        self._storage = FileStorage("Source.fs", create=True)
+
+    def tearDown(self):
+        self._storage.close()
+        self._dst.close()
+        self._storage.cleanup()
+        self._dst.cleanup()
+
+    def new_dest(self):
+        return self._dst
+
+    def compare(self, src, dest):
+        # The dest storage has a truncated copy of dest, so
+        # use compare_truncated() instead of compare_exact().
+        self.compare_truncated(src, dest)

Added: relstorage/trunk/relstorage/tests/hptestbase.py
===================================================================
--- relstorage/trunk/relstorage/tests/hptestbase.py	                        (rev 0)
+++ relstorage/trunk/relstorage/tests/hptestbase.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -0,0 +1,275 @@
+##############################################################################
+#
+# Copyright (c) 2008 Zope Foundation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""A foundation for history-preserving RelStorage tests"""
+
+from persistent.mapping import PersistentMapping
+from relstorage.tests.RecoveryStorage import UndoableRecoveryStorage
+from relstorage.tests.reltestbase import GenericRelStorageTests
+from relstorage.tests.reltestbase import RelStorageTestBase
+from ZODB.DB import DB
+from ZODB.FileStorage import FileStorage
+from ZODB.serialize import referencesf
+from ZODB.tests import HistoryStorage
+from ZODB.tests import IteratorStorage
+from ZODB.tests import PackableStorage
+from ZODB.tests import RevisionStorage
+from ZODB.tests import TransactionalUndoStorage
+from ZODB.tests.MinPO import MinPO
+from ZODB.tests.StorageTestBase import zodb_pickle
+from ZODB.utils import p64
+import time
+import transaction
+
+
+class HistoryPreservingRelStorageTests(
+    GenericRelStorageTests,
+    TransactionalUndoStorage.TransactionalUndoStorage,
+    IteratorStorage.IteratorStorage,
+    IteratorStorage.ExtendedIteratorStorage,
+    RevisionStorage.RevisionStorage,
+    PackableStorage.PackableUndoStorage,
+    HistoryStorage.HistoryStorage,
+    ):
+
+    keep_history = True
+
+    def checkTransactionalUndoIterator(self):
+        # this test overrides the broken version in TransactionalUndoStorage.
+
+        s = self._storage
+
+        BATCHES = 4
+        OBJECTS = 4
+
+        orig = []
+        for i in range(BATCHES):
+            t = transaction.Transaction()
+            tid = p64(i + 1)
+            s.tpc_begin(t, tid)
+            txn_orig = []
+            for j in range(OBJECTS):
+                oid = s.new_oid()
+                obj = MinPO(i * OBJECTS + j)
+                revid = s.store(oid, None, zodb_pickle(obj), '', t)
+                txn_orig.append((tid, oid, revid))
+            serials = s.tpc_vote(t)
+            if not serials:
+                orig.extend(txn_orig)
+            else:
+                # The storage provided revision IDs after the vote
+                serials = dict(serials)
+                for tid, oid, revid in txn_orig:
+                    self.assertEqual(revid, None)
+                    orig.append((tid, oid, serials[oid]))
+            s.tpc_finish(t)
+
+        i = 0
+        for tid, oid, revid in orig:
+            self._dostore(oid, revid=revid, data=MinPO(revid),
+                          description="update %s" % i)
+
+        # Undo the OBJECTS transactions that modified objects created
+        # in the ith original transaction.
+
+        def undo(i):
+            info = s.undoInfo()
+            t = transaction.Transaction()
+            s.tpc_begin(t)
+            base = i * OBJECTS + i
+            for j in range(OBJECTS):
+                tid = info[base + j]['id']
+                s.undo(tid, t)
+            s.tpc_vote(t)
+            s.tpc_finish(t)
+
+        for i in range(BATCHES):
+            undo(i)
+
+        # There are now (2 + OBJECTS) * BATCHES transactions:
+        #     BATCHES original transactions, followed by
+        #     OBJECTS * BATCHES modifications, followed by
+        #     BATCHES undos
+
+        iter = s.iterator()
+        offset = 0
+
+        eq = self.assertEqual
+
+        for i in range(BATCHES):
+            txn = iter[offset]
+            offset += 1
+
+            tid = p64(i + 1)
+            eq(txn.tid, tid)
+
+            L1 = [(rec.oid, rec.tid, rec.data_txn) for rec in txn]
+            L2 = [(oid, revid, None) for _tid, oid, revid in orig
+                  if _tid == tid]
+
+            eq(L1, L2)
+
+        for i in range(BATCHES * OBJECTS):
+            txn = iter[offset]
+            offset += 1
+            eq(len([rec for rec in txn if rec.data_txn is None]), 1)
+
+        for i in range(BATCHES):
+            txn = iter[offset]
+            offset += 1
+
+            # The undos are performed in reverse order.
+            otid = p64(BATCHES - i)
+            L1 = [rec.oid for rec in txn]
+            L2 = [oid for _tid, oid, revid in orig if _tid == otid]
+            L1.sort()
+            L2.sort()
+            eq(L1, L2)
+
+        self.assertRaises(IndexError, iter.__getitem__, offset)
+
+    def checkNonASCIITransactionMetadata(self):
+        # Verify the database stores and retrieves non-ASCII text
+        # in transaction metadata.
+        ugly_string = ''.join(chr(c) for c in range(256))
+
+        db = DB(self._storage)
+        try:
+            c1 = db.open()
+            r1 = c1.root()
+            r1['alpha'] = 1
+            transaction.get().setUser(ugly_string)
+            transaction.commit()
+            r1['alpha'] = 2
+            transaction.get().note(ugly_string)
+            transaction.commit()
+
+            info = self._storage.undoInfo()
+            self.assertEqual(info[0]['description'], ugly_string)
+            self.assertEqual(info[1]['user_name'], '/ ' + ugly_string)
+        finally:
+            db.close()
+
+    def checkPackGC(self, expect_object_deleted=True):
+        db = DB(self._storage)
+        try:
+            c1 = db.open()
+            r1 = c1.root()
+            r1['alpha'] = PersistentMapping()
+            transaction.commit()
+
+            oid = r1['alpha']._p_oid
+            r1['alpha'] = None
+            transaction.commit()
+
+            # The object should still exist
+            self._storage.load(oid, '')
+
+            # Pack
+            now = packtime = time.time()
+            while packtime <= now:
+                packtime = time.time()
+            self._storage.pack(packtime, referencesf)
+            self._storage.sync()
+
+            if expect_object_deleted:
+                # The object should now be gone
+                self.assertRaises(KeyError, self._storage.load, oid, '')
+            else:
+                # The object should still exist
+                self._storage.load(oid, '')
+        finally:
+            db.close()
+
+    def checkPackGCDisabled(self):
+        self._storage._options.pack_gc = False
+        self.checkPackGC(expect_object_deleted=False)
+
+    def checkPackGCDryRun(self):
+        self._storage._options.pack_dry_run = True
+        self.checkPackGC(expect_object_deleted=False)
+
+    def checkPackOldUnreferenced(self):
+        db = DB(self._storage)
+        try:
+            c1 = db.open()
+            r1 = c1.root()
+            r1['A'] = PersistentMapping()
+            B = PersistentMapping()
+            r1['A']['B'] = B
+            transaction.get().note('add A then add B to A')
+            transaction.commit()
+
+            del r1['A']['B']
+            transaction.get().note('remove B from A')
+            transaction.commit()
+
+            r1['A']['C'] = ''
+            transaction.get().note('add C to A')
+            transaction.commit()
+
+            now = packtime = time.time()
+            while packtime <= now:
+                packtime = time.time()
+            self._storage.pack(packtime, referencesf)
+
+            # B should be gone, since nothing refers to it.
+            self.assertRaises(KeyError, self._storage.load, B._p_oid, '')
+
+        finally:
+            db.close()
+
+
+class HistoryPreservingToFileStorage(
+        RelStorageTestBase,
+        UndoableRecoveryStorage,
+        ):
+
+    keep_history = True
+
+    def setUp(self):
+        self.open(create=1)
+        self._storage.zap_all()
+        self._dst = FileStorage("Dest.fs", create=True)
+
+    def tearDown(self):
+        self._storage.close()
+        self._dst.close()
+        self._storage.cleanup()
+        self._dst.cleanup()
+
+    def new_dest(self):
+        return FileStorage('Dest.fs')
+
+
+class HistoryPreservingFromFileStorage(
+        RelStorageTestBase,
+        UndoableRecoveryStorage,
+        ):
+
+    keep_history = True
+
+    def setUp(self):
+        self.open(create=1)
+        self._storage.zap_all()
+        self._dst = self._storage
+        self._storage = FileStorage("Source.fs", create=True)
+
+    def tearDown(self):
+        self._storage.close()
+        self._dst.close()
+        self._storage.cleanup()
+        self._dst.cleanup()
+
+    def new_dest(self):
+        return self._dst

Modified: relstorage/trunk/relstorage/tests/reltestbase.py
===================================================================
--- relstorage/trunk/relstorage/tests/reltestbase.py	2009-09-24 22:18:48 UTC (rev 104515)
+++ relstorage/trunk/relstorage/tests/reltestbase.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -11,40 +11,39 @@
 # FOR A PARTICULAR PURPOSE.
 #
 ##############################################################################
-"""A foundation for relstorage adapter tests"""
+"""A foundation for RelStorage tests"""
 
-import itertools
-import time
+from persistent import Persistent
+from persistent.mapping import PersistentMapping
 from relstorage.relstorage import RelStorage
 from relstorage.tests import fakecache
-
 from ZODB.DB import DB
+from ZODB.serialize import referencesf
+from ZODB.tests import BasicStorage
+from ZODB.tests import ConflictResolution
+from ZODB.tests import MTStorage
+from ZODB.tests import PackableStorage
+from ZODB.tests import PersistentStorage
+from ZODB.tests import ReadOnlyStorage
+from ZODB.tests import StorageTestBase
+from ZODB.tests import Synchronization
+from ZODB.tests.MinPO import MinPO
+from ZODB.tests.StorageTestBase import zodb_pickle
+from ZODB.tests.StorageTestBase import zodb_unpickle
 from ZODB.utils import p64
-from ZODB.FileStorage import FileStorage
-from persistent import Persistent
-from persistent.mapping import PersistentMapping
+import time
 import transaction
 
-from ZODB.tests import StorageTestBase, BasicStorage, \
-     TransactionalUndoStorage, PackableStorage, \
-     Synchronization, ConflictResolution, HistoryStorage, \
-     IteratorStorage, RevisionStorage, PersistentStorage, \
-     MTStorage, ReadOnlyStorage, RecoveryStorage
 
-from ZODB.tests.MinPO import MinPO
-from ZODB.tests.StorageTestBase import zodb_unpickle, zodb_pickle
-from ZODB.serialize import referencesf
+class RelStorageTestBase(StorageTestBase.StorageTestBase):
 
-
-class BaseRelStorageTests(StorageTestBase.StorageTestBase):
-
     def make_adapter(self):
         # abstract method
         raise NotImplementedError
 
     def open(self, **kwargs):
         adapter = self.make_adapter()
-        self._storage = RelStorage(adapter, pack_gc=True, **kwargs)
+        self._storage = RelStorage(adapter, **kwargs)
 
     def setUp(self):
         self.open(create=1)
@@ -56,21 +55,15 @@
         self._storage.cleanup()
 
 
-class RelStorageTests(
-    BaseRelStorageTests,
+class GenericRelStorageTests(
+    RelStorageTestBase,
     BasicStorage.BasicStorage,
-    TransactionalUndoStorage.TransactionalUndoStorage,
-    RevisionStorage.RevisionStorage,
     PackableStorage.PackableStorage,
-    PackableStorage.PackableUndoStorage,
     Synchronization.SynchronizedStorage,
     ConflictResolution.ConflictResolvingStorage,
-    HistoryStorage.HistoryStorage,
-    IteratorStorage.IteratorStorage,
-    IteratorStorage.ExtendedIteratorStorage,
     PersistentStorage.PersistentStorage,
     MTStorage.MTStorage,
-    ReadOnlyStorage.ReadOnlyStorage
+    ReadOnlyStorage.ReadOnlyStorage,
     ):
 
     def checkDropAndPrepare(self):
@@ -250,6 +243,7 @@
             self.assertTrue('commit_count' in fakecache.data)
             self.assertEqual(len(fakecache.data), 3)
             oid = r1['alpha']._p_oid
+            self.assertEqual(len(fakecache.data), 3)
 
             got, serial = c1._storage.load(oid, '')
             # another tid and state should now be cached
@@ -372,192 +366,24 @@
         fakecache.data.clear()
         self.checkPollInterval(using_cache=True)
 
-
-    def checkTransactionalUndoIterator(self):
-        # this test overrides the broken version in TransactionalUndoStorage.
-
-        s = self._storage
-
-        BATCHES = 4
-        OBJECTS = 4
-
-        orig = []
-        for i in range(BATCHES):
-            t = transaction.Transaction()
-            tid = p64(i + 1)
-            s.tpc_begin(t, tid)
-            txn_orig = []
-            for j in range(OBJECTS):
-                oid = s.new_oid()
-                obj = MinPO(i * OBJECTS + j)
-                revid = s.store(oid, None, zodb_pickle(obj), '', t)
-                txn_orig.append((tid, oid, revid))
-            serials = s.tpc_vote(t)
-            if not serials:
-                orig.extend(txn_orig)
-            else:
-                # The storage provided revision IDs after the vote
-                serials = dict(serials)
-                for tid, oid, revid in txn_orig:
-                    self.assertEqual(revid, None)
-                    orig.append((tid, oid, serials[oid]))
-            s.tpc_finish(t)
-
-        i = 0
-        for tid, oid, revid in orig:
-            self._dostore(oid, revid=revid, data=MinPO(revid),
-                          description="update %s" % i)
-
-        # Undo the OBJECTS transactions that modified objects created
-        # in the ith original transaction.
-
-        def undo(i):
-            info = s.undoInfo()
-            t = transaction.Transaction()
-            s.tpc_begin(t)
-            base = i * OBJECTS + i
-            for j in range(OBJECTS):
-                tid = info[base + j]['id']
-                s.undo(tid, t)
-            s.tpc_vote(t)
-            s.tpc_finish(t)
-
-        for i in range(BATCHES):
-            undo(i)
-
-        # There are now (2 + OBJECTS) * BATCHES transactions:
-        #     BATCHES original transactions, followed by
-        #     OBJECTS * BATCHES modifications, followed by
-        #     BATCHES undos
-
-        iter = s.iterator()
-        offset = 0
-
-        eq = self.assertEqual
-
-        for i in range(BATCHES):
-            txn = iter[offset]
-            offset += 1
-
-            tid = p64(i + 1)
-            eq(txn.tid, tid)
-
-            L1 = [(rec.oid, rec.tid, rec.data_txn) for rec in txn]
-            L2 = [(oid, revid, None) for _tid, oid, revid in orig
-                  if _tid == tid]
-
-            eq(L1, L2)
-
-        for i in range(BATCHES * OBJECTS):
-            txn = iter[offset]
-            offset += 1
-            eq(len([rec for rec in txn if rec.data_txn is None]), 1)
-
-        for i in range(BATCHES):
-            txn = iter[offset]
-            offset += 1
-
-            # The undos are performed in reverse order.
-            otid = p64(BATCHES - i)
-            L1 = [rec.oid for rec in txn]
-            L2 = [oid for _tid, oid, revid in orig if _tid == otid]
-            L1.sort()
-            L2.sort()
-            eq(L1, L2)
-
-        self.assertRaises(IndexError, iter.__getitem__, offset)
-
-    def checkNonASCIITransactionMetadata(self):
-        # Verify the database stores and retrieves non-ASCII text
-        # in transaction metadata.
-        ugly_string = ''.join(chr(c) for c in range(256))
-
+    def checkDoubleCommitter(self):
+        # Verify we can store an object that gets committed twice in
+        # a single transaction.
         db = DB(self._storage)
         try:
-            c1 = db.open()
-            r1 = c1.root()
-            r1['alpha'] = 1
-            transaction.get().setUser(ugly_string)
-            transaction.commit()
-            r1['alpha'] = 2
-            transaction.get().note(ugly_string)
-            transaction.commit()
-
-            info = self._storage.undoInfo()
-            self.assertEqual(info[0]['description'], ugly_string)
-            self.assertEqual(info[1]['user_name'], '/ ' + ugly_string)
+            conn = db.open()
+            try:
+                conn.root()['dc'] = DoubleCommitter()
+                transaction.commit()
+                conn2 = db.open()
+                self.assertEquals(conn2.root()['dc'].new_attribute, 1)
+                conn2.close()
+            finally:
+                transaction.abort()
+                conn.close()
         finally:
             db.close()
 
-    def checkPackGC(self, expect_object_deleted=True):
-        db = DB(self._storage)
-        try:
-            c1 = db.open()
-            r1 = c1.root()
-            r1['alpha'] = PersistentMapping()
-            transaction.commit()
-
-            oid = r1['alpha']._p_oid
-            r1['alpha'] = None
-            transaction.commit()
-
-            # The object should still exist
-            self._storage.load(oid, '')
-
-            # Pack
-            now = packtime = time.time()
-            while packtime <= now:
-                packtime = time.time()
-            self._storage.pack(packtime, referencesf)
-            self._storage.sync()
-
-            if expect_object_deleted:
-                # The object should now be gone
-                self.assertRaises(KeyError, self._storage.load, oid, '')
-            else:
-                # The object should still exist
-                self._storage.load(oid, '')
-        finally:
-            db.close()
-
-    def checkPackGCDisabled(self):
-        self._storage._options.pack_gc = False
-        self.checkPackGC(expect_object_deleted=False)
-
-    def checkPackGCDryRun(self):
-        self._storage._options.pack_dry_run = True
-        self.checkPackGC(expect_object_deleted=False)
-
-    def checkPackOldUnreferenced(self):
-        db = DB(self._storage)
-        try:
-            c1 = db.open()
-            r1 = c1.root()
-            r1['A'] = PersistentMapping()
-            B = PersistentMapping()
-            r1['A']['B'] = B
-            transaction.get().note('add A then add B to A')
-            transaction.commit()
-
-            del r1['A']['B']
-            transaction.get().note('remove B from A')
-            transaction.commit()
-
-            r1['A']['C'] = ''
-            transaction.get().note('add C to A')
-            transaction.commit()
-
-            now = packtime = time.time()
-            while packtime <= now:
-                packtime = time.time()
-            self._storage.pack(packtime, referencesf)
-
-            # B should be gone, since nothing refers to it.
-            self.assertRaises(KeyError, self._storage.load, B._p_oid, '')
-
-        finally:
-            db.close()
-
     def checkPackDutyCycle(self):
         # Exercise the code in the pack algorithm that releases the
         # commit lock for a time to allow concurrent transactions to commit.
@@ -569,34 +395,24 @@
 
         db = DB(self._storage)
         try:
+            # add some data to be packed
+            c = db.open()
+            r = c.root()
+            r['alpha'] = PersistentMapping()
+            transaction.commit()
+            del r['alpha']
+            transaction.commit()
+
             # Pack
             now = packtime = time.time()
             while packtime <= now:
                 packtime = time.time()
             self._storage.pack(packtime, referencesf, sleep=sim_sleep)
 
-            self.assertEquals(len(slept), 1)
+            self.assertTrue(len(slept) > 0)
         finally:
             db.close()
 
-    def checkDoubleCommitter(self):
-        # Verify we can store an object that gets committed twice in
-        # a single transaction.
-        db = DB(self._storage)
-        try:
-            conn = db.open()
-            try:
-                conn.root()['dc'] = DoubleCommitter()
-                transaction.commit()
-                conn2 = db.open()
-                self.assertEquals(conn2.root()['dc'].new_attribute, 1)
-                conn2.close()
-            finally:
-                transaction.abort()
-                conn.close()
-        finally:
-            db.close()
-
     def checkPackBrokenPickle(self):
         # Verify the pack stops with the right exception if it encounters
         # a broken pickle.
@@ -612,89 +428,3 @@
         if not hasattr(self, 'new_attribute'):
             self.new_attribute = 1
         return Persistent.__getstate__(self)
-
-
-class IteratorDeepCompareUnordered:
-    # Like IteratorDeepCompare, but compensates for OID order
-    # differences in transactions.
-    def compare(self, storage1, storage2):
-        eq = self.assertEqual
-        iter1 = storage1.iterator()
-        iter2 = storage2.iterator()
-        for txn1, txn2 in itertools.izip(iter1, iter2):
-            eq(txn1.tid,         txn2.tid)
-            eq(txn1.status,      txn2.status)
-            eq(txn1.user,        txn2.user)
-            eq(txn1.description, txn2.description)
-
-            missing = object()
-            e1 = getattr(txn1, 'extension', missing)
-            if e1 is missing:
-                # old attribute name
-                e1 = txn1._extension
-            e2 = getattr(txn2, 'extension', missing)
-            if e2 is missing:
-                # old attribute name
-                e2 = txn2._extension
-            eq(e1, e2)
-
-            recs1 = [(r.oid, r) for r in txn1]
-            recs1.sort()
-            recs2 = [(r.oid, r) for r in txn2]
-            recs2.sort()
-            eq(len(recs1), len(recs2))
-            for (oid1, rec1), (oid2, rec2) in zip(recs1, recs2):
-                eq(rec1.oid,     rec2.oid)
-                eq(rec1.tid,  rec2.tid)
-                eq(rec1.version, rec2.version)
-                eq(rec1.data,    rec2.data)
-        # Make sure ther are no more records left in txn1 and txn2, meaning
-        # they were the same length
-        self.assertRaises(IndexError, iter1.next)
-        self.assertRaises(IndexError, iter2.next)
-        iter1.close()
-        iter2.close()
-
-
-
-class RecoveryStorageSubset(IteratorDeepCompareUnordered):
-    # The subset of RecoveryStorage tests that do not rely on version
-    # support.
-    pass
-
-for name, attr in RecoveryStorage.RecoveryStorage.__dict__.items():
-    if 'check' in name and 'Version' not in name:
-        setattr(RecoveryStorageSubset, name, attr)
-
-
-class ToFileStorage(BaseRelStorageTests, RecoveryStorageSubset):
-    def setUp(self):
-        self.open(create=1)
-        self._storage.zap_all()
-        self._dst = FileStorage("Dest.fs", create=True)
-
-    def tearDown(self):
-        self._storage.close()
-        self._dst.close()
-        self._storage.cleanup()
-        self._dst.cleanup()
-
-    def new_dest(self):
-        return FileStorage('Dest.fs')
-
-
-class FromFileStorage(BaseRelStorageTests, RecoveryStorageSubset):
-    def setUp(self):
-        self.open(create=1)
-        self._storage.zap_all()
-        self._dst = self._storage
-        self._storage = FileStorage("Source.fs", create=True)
-
-    def tearDown(self):
-        self._storage.close()
-        self._dst.close()
-        self._storage.cleanup()
-        self._dst.cleanup()
-
-    def new_dest(self):
-        return self._dst

Modified: relstorage/trunk/relstorage/tests/testmysql.py
===================================================================
--- relstorage/trunk/relstorage/tests/testmysql.py	2009-09-24 22:18:48 UTC (rev 104515)
+++ relstorage/trunk/relstorage/tests/testmysql.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -13,30 +13,47 @@
 ##############################################################################
 """Tests of relstorage.adapters.mysql"""
 
+from relstorage.adapters.mysql import MySQLAdapter
+from relstorage.tests.hftestbase import HistoryFreeFromFileStorage
+from relstorage.tests.hftestbase import HistoryFreeRelStorageTests
+from relstorage.tests.hftestbase import HistoryFreeToFileStorage
+from relstorage.tests.hptestbase import HistoryPreservingFromFileStorage
+from relstorage.tests.hptestbase import HistoryPreservingRelStorageTests
+from relstorage.tests.hptestbase import HistoryPreservingToFileStorage
 import logging
 import unittest
 
-import reltestbase
-from relstorage.adapters.mysql import MySQLAdapter
-
-
 class UseMySQLAdapter:
     def make_adapter(self):
+        if self.keep_history:
+            db = 'relstoragetest'
+        else:
+            db = 'relstoragetest_hf'
         return MySQLAdapter(
-            db='relstoragetest',
+            keep_history=self.keep_history,
+            db=db,
             user='relstoragetest',
             passwd='relstoragetest',
             )
 
-class MySQLTests(UseMySQLAdapter, reltestbase.RelStorageTests):
+class HPMySQLTests(UseMySQLAdapter, HistoryPreservingRelStorageTests):
     pass
 
-class MySQLToFile(UseMySQLAdapter, reltestbase.ToFileStorage):
+class HPMySQLToFile(UseMySQLAdapter, HistoryPreservingToFileStorage):
     pass
 
-class FileToMySQL(UseMySQLAdapter, reltestbase.FromFileStorage):
+class HPMySQLFromFile(UseMySQLAdapter, HistoryPreservingFromFileStorage):
     pass
 
+class HFMySQLTests(UseMySQLAdapter, HistoryFreeRelStorageTests):
+    pass
+
+class HFMySQLToFile(UseMySQLAdapter, HistoryFreeToFileStorage):
+    pass
+
+class HFMySQLFromFile(UseMySQLAdapter, HistoryFreeFromFileStorage):
+    pass
+
 db_names = {
     'data': 'relstoragetest',
     '1': 'relstoragetest',
@@ -46,33 +63,55 @@
 
 def test_suite():
     suite = unittest.TestSuite()
-    for klass in [MySQLTests, MySQLToFile, FileToMySQL]:
+    for klass in [
+            HPMySQLTests,
+            HPMySQLToFile,
+            HPMySQLFromFile,
+            HFMySQLTests,
+            HFMySQLToFile,
+            HFMySQLFromFile,
+            ]:
         suite.addTest(unittest.makeSuite(klass, "check"))
 
     try:
-        from ZODB.tests.testblob import storage_reusable_suite
+        import ZODB.blob
     except ImportError:
-        # ZODB < 3.9
+        # ZODB < 3.8
         pass
     else:
-        def create_storage(name, blob_dir):
-            from relstorage.relstorage import RelStorage
-            adapter = MySQLAdapter(
-                db=db_names[name],
-                user='relstoragetest',
-                passwd='relstoragetest',
-                )
-            storage = RelStorage(adapter, name=name, create=True,
-                blob_dir=blob_dir)
-            storage.zap_all()
-            return storage
+        from relstorage.tests.blob.testblob import storage_reusable_suite
+        for keep_history in (False, True):
+            def create_storage(name, blob_dir, keep_history=keep_history):
+                from relstorage.relstorage import RelStorage
+                db = db_names[name]
+                if not keep_history:
+                    db += '_hf'
+                adapter = MySQLAdapter(
+                    keep_history=keep_history,
+                    db=db,
+                    user='relstoragetest',
+                    passwd='relstoragetest',
+                    )
+                storage = RelStorage(adapter, name=name, create=True,
+                    blob_dir=blob_dir)
+                storage.zap_all()
+                return storage
 
-        suite.addTest(storage_reusable_suite(
-            'MySQL', create_storage,
-            test_blob_storage_recovery=True,
-            test_packing=True,
-            ))
+            if keep_history:
+                prefix = 'HPMySQL'
+                pack_test_name = 'blob_packing.txt'
+            else:
+                prefix = 'HFMySQL'
+                pack_test_name = 'blob_packing_history_free.txt'
 
+            suite.addTest(storage_reusable_suite(
+                prefix, create_storage,
+                test_blob_storage_recovery=True,
+                test_packing=True,
+                test_undo=keep_history,
+                pack_test_name=pack_test_name,
+                ))
+
     return suite
 
 if __name__=='__main__':

Modified: relstorage/trunk/relstorage/tests/testoracle.py
===================================================================
--- relstorage/trunk/relstorage/tests/testoracle.py	2009-09-24 22:18:48 UTC (rev 104515)
+++ relstorage/trunk/relstorage/tests/testoracle.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -18,8 +18,10 @@
 import re
 import unittest
 
-import reltestbase
 from relstorage.adapters.oracle import OracleAdapter
+from relstorage.tests.hptestbase import HistoryPreservingFromFileStorage
+from relstorage.tests.hptestbase import HistoryPreservingRelStorageTests
+from relstorage.tests.hptestbase import HistoryPreservingToFileStorage
 
 
 def getOracleParams():
@@ -43,19 +45,23 @@
         user, password, dsn = getOracleParams()
         return OracleAdapter(user, password, dsn)
 
-class OracleTests(UseOracleAdapter, reltestbase.RelStorageTests):
+class HPOracleTests(UseOracleAdapter, HistoryPreservingRelStorageTests):
     pass
 
-class OracleToFile(UseOracleAdapter, reltestbase.ToFileStorage):
+class HPOracleToFile(UseOracleAdapter, HistoryPreservingToFileStorage):
     pass
 
-class FileToOracle(UseOracleAdapter, reltestbase.FromFileStorage):
+class HPOracleFromFile(UseOracleAdapter, HistoryPreservingFromFileStorage):
     pass
 
 
 def test_suite():
     suite = unittest.TestSuite()
-    for klass in [OracleTests, OracleToFile, FileToOracle]:
+    for klass in [
+            HPOracleTests,
+            HPOracleToFile,
+            HPOracleFromFile,
+            ]:
         suite.addTest(unittest.makeSuite(klass, "check"))
     return suite
 

Modified: relstorage/trunk/relstorage/tests/testpostgresql.py
===================================================================
--- relstorage/trunk/relstorage/tests/testpostgresql.py	2009-09-24 22:18:48 UTC (rev 104515)
+++ relstorage/trunk/relstorage/tests/testpostgresql.py	2009-09-25 08:05:01 UTC (rev 104516)
@@ -13,27 +13,46 @@
 ##############################################################################
 """Tests of relstorage.adapters.postgresql"""
 
+from relstorage.adapters.postgresql import PostgreSQLAdapter
+from relstorage.tests.hftestbase import HistoryFreeFromFileStorage
+from relstorage.tests.hftestbase import HistoryFreeRelStorageTests
+from relstorage.tests.hftestbase import HistoryFreeToFileStorage
+from relstorage.tests.hptestbase import HistoryPreservingFromFileStorage
+from relstorage.tests.hptestbase import HistoryPreservingRelStorageTests
+from relstorage.tests.hptestbase import HistoryPreservingToFileStorage
 import logging
 import unittest
 
-import reltestbase
-from relstorage.adapters.postgresql import PostgreSQLAdapter
-
-
 class UsePostgreSQLAdapter:
     def make_adapter(self):
+        if self.keep_history:
+            db = 'relstoragetest'
+        else:
+            db = 'relstoragetest_hf'
         return PostgreSQLAdapter(
-            'dbname=relstoragetest user=relstoragetest password=relstoragetest')
+            keep_history=self.keep_history,
+            dsn='dbname=%s user=relstoragetest password=relstoragetest' % db
+            )
 
-class PostgreSQLTests(UsePostgreSQLAdapter, reltestbase.RelStorageTests):
+class HPPostgreSQLTests(UsePostgreSQLAdapter, HistoryPreservingRelStorageTests):
     pass
 
-class PGToFile(UsePostgreSQLAdapter, reltestbase.ToFileStorage):
+class HPPostgreSQLToFile(UsePostgreSQLAdapter, HistoryPreservingToFileStorage):
     pass
 
-class FileToPG(UsePostgreSQLAdapter, reltestbase.FromFileStorage):
+class HPPostgreSQLFromFile(UsePostgreSQLAdapter,
+        HistoryPreservingFromFileStorage):
     pass
 
+class HFPostgreSQLTests(UsePostgreSQLAdapter, HistoryFreeRelStorageTests):
+    pass
+
+class HFPostgreSQLToFile(UsePostgreSQLAdapter, HistoryFreeToFileStorage):
+    pass
+
+class HFPostgreSQLFromFile(UsePostgreSQLAdapter, HistoryFreeFromFileStorage):
+    pass
+
 db_names = {
     'data': 'relstoragetest',
     '1': 'relstoragetest',
@@ -43,31 +62,53 @@
 
 def test_suite():
     suite = unittest.TestSuite()
-    for klass in [PostgreSQLTests, PGToFile, FileToPG]:
+    for klass in [
+            HPPostgreSQLTests,
+            HPPostgreSQLToFile,
+            HPPostgreSQLFromFile,
+            HFPostgreSQLTests,
+            HFPostgreSQLToFile,
+            HFPostgreSQLFromFile,
+            ]:
         suite.addTest(unittest.makeSuite(klass, "check"))
 
     try:
-        from ZODB.tests.testblob import storage_reusable_suite
+        import ZODB.blob
     except ImportError:
-        # ZODB < 3.9
+        # ZODB < 3.8
         pass
     else:
-        def create_storage(name, blob_dir):
-            from relstorage.relstorage import RelStorage
-            adapter = PostgreSQLAdapter(
-                'dbname=%s user=relstoragetest password=relstoragetest' %
-                db_names[name])
-            storage = RelStorage(adapter, name=name, create=True,
-                blob_dir=blob_dir)
-            storage.zap_all()
-            return storage
+        from relstorage.tests.blob.testblob import storage_reusable_suite
+        for keep_history in (False, True):
+            def create_storage(name, blob_dir, keep_history=keep_history):
+                from relstorage.relstorage import RelStorage
+                db = db_names[name]
+                if not keep_history:
+                    db += '_hf'
+                dsn = ('dbname=%s user=relstoragetest '
+                        'password=relstoragetest' % db)
+                adapter = PostgreSQLAdapter(
+                    keep_history=keep_history, dsn=dsn)
+                storage = RelStorage(adapter, name=name, create=True,
+                    blob_dir=blob_dir)
+                storage.zap_all()
+                return storage
 
-        suite.addTest(storage_reusable_suite(
-            'PostgreSQL', create_storage,
-            test_blob_storage_recovery=True,
-            test_packing=True,
-            ))
+            if keep_history:
+                prefix = 'HPPostgreSQL'
+                pack_test_name = 'blob_packing.txt'
+            else:
+                prefix = 'HFPostgreSQL'
+                pack_test_name = 'blob_packing_history_free.txt'
 
+            suite.addTest(storage_reusable_suite(
+                prefix, create_storage,
+                test_blob_storage_recovery=True,
+                test_packing=True,
+                test_undo=keep_history,
+                pack_test_name=pack_test_name,
+                ))
+
     return suite
 
 if __name__=='__main__':



More information about the checkins mailing list