[Checkins] SVN: relstorage/trunk/relstorage/ Now all but 3 tests pass.

Shane Hathaway shane at hathawaymix.org
Tue Oct 19 20:01:01 EDT 2010


Log message for revision 117781:
  Now all but 3 tests pass.
  

Changed:
  U   relstorage/trunk/relstorage/adapters/mover.py
  U   relstorage/trunk/relstorage/adapters/packundo.py
  U   relstorage/trunk/relstorage/adapters/schema.py
  U   relstorage/trunk/relstorage/blobhelper.py
  U   relstorage/trunk/relstorage/storage.py
  U   relstorage/trunk/relstorage/tests/blob/blob_transaction.txt
  U   relstorage/trunk/relstorage/tests/testoracle.py
  U   relstorage/trunk/relstorage/tests/testpostgresql.py

-=-
Modified: relstorage/trunk/relstorage/adapters/mover.py
===================================================================
--- relstorage/trunk/relstorage/adapters/mover.py	2010-10-19 21:58:48 UTC (rev 117780)
+++ relstorage/trunk/relstorage/adapters/mover.py	2010-10-20 00:01:00 UTC (rev 117781)
@@ -830,21 +830,6 @@
                 """
             cursor.execute(stmt, (tid,))
 
-            if txn_has_blobs:
-                if self.database_name == 'oracle':
-                    stmt = """
-                    INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
-                    SELECT zoid, :1, chunk_num, chunk
-                    FROM temp_blob_chunk
-                    """
-                else:
-                    stmt = """
-                    INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
-                    SELECT zoid, %s, chunk_num, chunk
-                    FROM temp_blob_chunk
-                    """
-                cursor.execute(stmt, (tid,))
-
         else:
             if self.database_name == 'mysql':
                 stmt = """
@@ -882,12 +867,20 @@
                 """
                 cursor.execute(stmt)
 
+        if txn_has_blobs:
+            if self.database_name == 'oracle':
                 stmt = """
-                INSERT INTO blob_chunk (zoid, chunk_num, chunk)
-                SELECT zoid, chunk_num, chunk
+                INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
+                SELECT zoid, :1, chunk_num, chunk
                 FROM temp_blob_chunk
                 """
-                cursor.execute(stmt)
+            else:
+                stmt = """
+                INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
+                SELECT zoid, %s, chunk_num, chunk
+                FROM temp_blob_chunk
+                """
+            cursor.execute(stmt, (tid,))
 
         stmt = """
         SELECT zoid FROM temp_store
@@ -978,23 +971,13 @@
 
     def generic_download_blob(self, cursor, oid, tid, filename):
         """Download a blob into a file."""
-        if self.keep_history:
-            stmt = """
-            SELECT chunk
-            FROM blob_chunk
-            WHERE zoid = %s
-                AND tid = %s
-                AND chunk_num = %s
-            """
-            use_tid = True
-        else:
-            stmt = """
-            SELECT chunk
-            FROM blob_chunk
-            WHERE zoid = %s
-                AND chunk_num = %s
-            """
-            use_tid = False
+        stmt = """
+        SELECT chunk
+        FROM blob_chunk
+        WHERE zoid = %s
+            AND tid = %s
+            AND chunk_num = %s
+        """
 
         use_base64 = False
         if self.database_name == 'postgresql':
@@ -1010,17 +993,13 @@
         try:
             chunk_num = 0
             while True:
-                if use_tid:
-                    params = (oid, tid, chunk_num)
-                else:
-                    params = (oid, chunk_num)
-                cursor.execute(stmt, params)
+                cursor.execute(stmt, (oid, tid, chunk_num))
                 rows = list(cursor)
                 if not rows:
-                    # No more chunks. Note: if there are no chunks at
-                    # all, then this method will not write a
-                    # file.  This is by design.
+                    # No more chunks.  Note: if there are no chunks at
+                    # all, then this method should not write a file.
                     break
+                assert len(rows) == 1
                 chunk = rows[0][0]
                 if use_base64:
                     chunk = decodestring(chunk)
@@ -1052,31 +1031,38 @@
         If serial is None, upload to the temporary table.
         """
         if tid is not None:
+            use_tid = True
             if self.keep_history:
                 delete_stmt = """
                 DELETE FROM blob_chunk
-                WHERE zoid = %s
-                    AND tid = %s
+                WHERE zoid = %s AND tid = %s
                 """
+                cursor.execute(delete_stmt, (oid, tid))
+
                 insert_stmt = """
                 INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
                 VALUES (%s, %s, %s, CHUNK)
                 """
-                use_tid = True
             else:
-                delete_stmt = "DELETE FROM blob_chunk WHERE zoid = %s"
+                delete_stmt = """
+                DELETE FROM blob_chunk
+                WHERE zoid = %s
+                """
+                cursor.execute(delete_stmt, (oid,))
+
                 insert_stmt = """
-                INSERT INTO blob_chunk (zoid, chunk_num, chunk)
-                VALUES (%s, %s, CHUNK)
+                INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
+                VALUES (%s, %s, %s, CHUNK)
                 """
-                use_tid = False
         else:
+            use_tid = False
             delete_stmt = "DELETE FROM temp_blob_chunk WHERE zoid = %s"
+            cursor.execute(delete_stmt, (oid,))
+
             insert_stmt = """
             INSERT INTO temp_blob_chunk (zoid, chunk_num, chunk)
             VALUES (%s, %s, CHUNK)
             """
-            use_tid = False
 
         use_base64 = False
         if self.database_name == 'postgresql':
@@ -1091,12 +1077,6 @@
                 delete_stmt = delete_stmt.replace('%s', ':%d' % n, 1)
                 insert_stmt = insert_stmt.replace('%s', ':%d' % n, 1)
 
-        if use_tid:
-            params = (oid, tid)
-        else:
-            params = (oid,)
-        cursor.execute(delete_stmt, params)
-
         f = open(filename, 'rb')
         try:
             chunk_num = 0

Modified: relstorage/trunk/relstorage/adapters/packundo.py
===================================================================
--- relstorage/trunk/relstorage/adapters/packundo.py	2010-10-19 21:58:48 UTC (rev 117780)
+++ relstorage/trunk/relstorage/adapters/packundo.py	2010-10-20 00:01:00 UTC (rev 117781)
@@ -396,13 +396,21 @@
         -- Copy old states forward.
         INSERT INTO object_state (zoid, tid, prev_tid, md5, state)
         SELECT temp_undo.zoid, %(self_tid)s, current_object.tid,
-            prev.md5, prev.state
+            md5, state
         FROM temp_undo
             JOIN current_object ON (temp_undo.zoid = current_object.zoid)
-            LEFT JOIN object_state prev
-                ON (prev.zoid = temp_undo.zoid
-                    AND prev.tid = temp_undo.prev_tid);
+            LEFT JOIN object_state
+                ON (object_state.zoid = temp_undo.zoid
+                    AND object_state.tid = temp_undo.prev_tid);
 
+        -- Copy old blob chunks forward.
+        INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
+        SELECT temp_undo.zoid, %(self_tid)s, chunk_num, chunk
+        FROM temp_undo
+            JOIN blob_chunk
+                ON (blob_chunk.zoid = temp_undo.zoid
+                    AND blob_chunk.tid = temp_undo.prev_tid);
+
         -- List the copied states.
         SELECT zoid, prev_tid FROM temp_undo
         """

Modified: relstorage/trunk/relstorage/adapters/schema.py
===================================================================
--- relstorage/trunk/relstorage/adapters/schema.py	2010-10-19 21:58:48 UTC (rev 117780)
+++ relstorage/trunk/relstorage/adapters/schema.py	2010-10-20 00:01:00 UTC (rev 117781)
@@ -486,6 +486,7 @@
             zoid        BIGINT NOT NULL,
             chunk_num   BIGINT NOT NULL,
                         PRIMARY KEY (zoid, chunk_num),
+            tid         BIGINT NOT NULL,
             chunk       BYTEA NOT NULL
         );
         CREATE INDEX blob_chunk_lookup ON blob_chunk (zoid);
@@ -507,6 +508,7 @@
             zoid        BIGINT NOT NULL,
             chunk_num   BIGINT NOT NULL,
                         PRIMARY KEY (zoid, chunk_num),
+            tid         BIGINT NOT NULL,
             chunk       LONGBLOB NOT NULL
         );
         CREATE INDEX blob_chunk_lookup ON blob_chunk (zoid);
@@ -528,6 +530,7 @@
             zoid        NUMBER(20) NOT NULL,
             chunk_num   NUMBER(20) NOT NULL,
                         PRIMARY KEY (zoid, chunk_num),
+            tid         BIGINT NOT NULL,
             chunk       BLOB
         );
         CREATE INDEX blob_chunk_lookup ON blob_chunk (zoid);

Modified: relstorage/trunk/relstorage/blobhelper.py
===================================================================
--- relstorage/trunk/relstorage/blobhelper.py	2010-10-19 21:58:48 UTC (rev 117780)
+++ relstorage/trunk/relstorage/blobhelper.py	2010-10-20 00:01:00 UTC (rev 117781)
@@ -80,8 +80,9 @@
     # currently uncommitted transaction.
     _txn_blobs = None
 
-    def __init__(self, options, fshelper=None, cache_checker=None):
+    def __init__(self, options, adapter, fshelper=None, cache_checker=None):
         self.options = options
+        self.adapter = adapter
         self.blob_dir = options.blob_dir
         self.shared_blob_dir = options.shared_blob_dir
 
@@ -103,8 +104,8 @@
             cache_checker = BlobCacheChecker(options)
         self.cache_checker = cache_checker
 
-    def new_instance(self):
-        return BlobHelper(options=self.options,
+    def new_instance(self, adapter):
+        return BlobHelper(options=self.options, adapter=adapter,
             fshelper=self.fshelper, cache_checker=self.cache_checker)
 
     def clear_temp(self):
@@ -117,13 +118,13 @@
     def close(self):
         self.cache_checker.close()
 
-    def download_blob(self, adapter, cursor, oid, serial, filename):
+    def download_blob(self, cursor, oid, serial, filename):
         """Download a blob into a file"""
-        bytes = adapter.mover.download_blob(
+        bytes = self.adapter.mover.download_blob(
             cursor, u64(oid), u64(serial), filename)
         self.cache_checker.loaded(bytes)
 
-    def upload_blob(self, adapter, cursor, oid, serial, filename):
+    def upload_blob(self, cursor, oid, serial, filename):
         """Upload a blob from a file.
 
         If serial is None, upload to the temporary table.
@@ -132,9 +133,9 @@
             tid_int = u64(serial)
         else:
             tid_int = None
-        adapter.mover.upload_blob(cursor, u64(oid), tid_int, filename)
+        self.adapter.mover.upload_blob(cursor, u64(oid), tid_int, filename)
 
-    def loadBlob(self, adapter, cursor, oid, serial):
+    def loadBlob(self, cursor, oid, serial):
         # Load a blob.  If it isn't present and we have a shared blob
         # directory, then assume that it doesn't exist on the server
         # and return None.
@@ -168,7 +169,7 @@
             if os.path.exists(blob_filename):
                 return _accessed(blob_filename)
 
-            self.download_blob(adapter, cursor, oid, serial, blob_filename)
+            self.download_blob(cursor, oid, serial, blob_filename)
 
             if os.path.exists(blob_filename):
                 return _accessed(blob_filename)
@@ -178,8 +179,8 @@
         finally:
             lock.close()
 
-    def openCommittedBlobFile(self, adapter, cursor, oid, serial, blob=None):
-        blob_filename = self.loadBlob(adapter, cursor, oid, serial)
+    def openCommittedBlobFile(self, cursor, oid, serial, blob=None):
+        blob_filename = self.loadBlob(cursor, oid, serial)
         try:
             if blob is None:
                 return open(blob_filename, 'rb')
@@ -198,7 +199,7 @@
                     # We're using a server shared cache.  If the file isn't
                     # here, it's not anywhere.
                     raise POSException.POSKeyError("No blob file", oid, serial)
-                self.download_blob(adapter, cursor, oid, serial, blob_filename)
+                self.download_blob(cursor, oid, serial, blob_filename)
                 if not os.path.exists(blob_filename):
                     raise POSException.POSKeyError("No blob file", oid, serial)
 
@@ -213,7 +214,7 @@
     def temporaryDirectory(self):
         return self.fshelper.temp_dir
 
-    def storeBlob(self, adapter, cursor, store,
+    def storeBlob(self, cursor, store_func,
             oid, serial, data, blobfilename, version, txn):
         """Storage API: store a blob object."""
         assert not version
@@ -233,9 +234,10 @@
         os.remove(target[:-1])
         self._add_blob_to_transaction(oid, target)
 
-        store(oid, serial, data, '', txn)
+        store_func(oid, serial, data, '', txn)
+
         if not self.shared_blob_dir:
-            self.upload_blob(adapter, cursor, oid, None, target)
+            self.upload_blob(cursor, oid, None, target)
 
     def _add_blob_to_transaction(self, oid, filename):
         if self._txn_blobs is None:
@@ -246,13 +248,13 @@
                 ZODB.blob.remove_committed(old_filename)
         self._txn_blobs[oid] = filename
 
-    def restoreBlob(self, adapter, cursor, oid, serial, blobfilename):
+    def restoreBlob(self, cursor, oid, serial, blobfilename):
         if self.shared_blob_dir:
             self.fshelper.getPathForOID(oid, create=True)
             targetname = self.fshelper.getBlobFilename(oid, serial)
             ZODB.blob.rename_or_copy_blob(blobfilename, targetname)
         else:
-            self.upload_blob(adapter, cursor, oid, serial, blobfilename)
+            self.upload_blob(cursor, oid, serial, blobfilename)
 
     def copy_undone(self, copied, tid):
         """After an undo operation, copy the matching blobs forward.

Modified: relstorage/trunk/relstorage/storage.py
===================================================================
--- relstorage/trunk/relstorage/storage.py	2010-10-19 21:58:48 UTC (rev 117780)
+++ relstorage/trunk/relstorage/storage.py	2010-10-20 00:01:00 UTC (rev 117781)
@@ -190,7 +190,7 @@
         if blobhelper is not None:
             self.blobhelper = blobhelper
         elif options.blob_dir:
-            self.blobhelper = BlobHelper(options=options)
+            self.blobhelper = BlobHelper(options=options, adapter=adapter)
 
     def new_instance(self):
         """Creates and returns another storage instance.
@@ -200,7 +200,7 @@
         adapter = self._adapter.new_instance()
         cache = self._cache.new_instance()
         if self.blobhelper is not None:
-            blobhelper = self.blobhelper.new_instance()
+            blobhelper = self.blobhelper.new_instance(adapter=adapter)
         else:
             blobhelper = None
         other = RelStorage(adapter=adapter, name=self.__name__,
@@ -1253,7 +1253,7 @@
         self._lock_acquire()
         try:
             cursor = self._load_cursor
-            return self.blobhelper.loadBlob(self._adapter, cursor, oid, serial)
+            return self.blobhelper.loadBlob(cursor, oid, serial)
         finally:
             self._lock_release()
 
@@ -1272,7 +1272,7 @@
         try:
             cursor = self._load_cursor
             return self.blobhelper.openCommittedBlobFile(
-                self._adapter, cursor, oid, serial, blob=blob)
+                cursor, oid, serial, blob=blob)
         finally:
             self._lock_release()
 
@@ -1298,7 +1298,7 @@
         try:
             self._batcher.flush()
             cursor = self._store_cursor
-            self.blobhelper.storeBlob(self._adapter, cursor, self.store,
+            self.blobhelper.storeBlob(cursor, self.store,
                 oid, serial, data, blobfilename, version, txn)
         finally:
             self._lock_release()
@@ -1314,8 +1314,7 @@
         try:
             self._batcher.flush()
             cursor = self._store_cursor
-            self.blobhelper.restoreBlob(
-                self._adapter, cursor, oid, serial, blobfilename)
+            self.blobhelper.restoreBlob(cursor, oid, serial, blobfilename)
         finally:
             self._lock_release()
 

Modified: relstorage/trunk/relstorage/tests/blob/blob_transaction.txt
===================================================================
--- relstorage/trunk/relstorage/tests/blob/blob_transaction.txt	2010-10-19 21:58:48 UTC (rev 117780)
+++ relstorage/trunk/relstorage/tests/blob/blob_transaction.txt	2010-10-20 00:01:00 UTC (rev 117781)
@@ -314,7 +314,7 @@
 
 And we shouldn't be able to read the data that we saved:
 
-    >>> blob_storage.loadBlob(blob._p_oid, s1)
+    >>> import pdb; pdb.set_trace(); blob_storage.loadBlob(blob._p_oid, s1)
     Traceback (most recent call last):
     ...
     POSKeyError: 'No blob file'

Modified: relstorage/trunk/relstorage/tests/testoracle.py
===================================================================
--- relstorage/trunk/relstorage/tests/testoracle.py	2010-10-19 21:58:48 UTC (rev 117780)
+++ relstorage/trunk/relstorage/tests/testoracle.py	2010-10-20 00:01:00 UTC (rev 117781)
@@ -165,38 +165,47 @@
     else:
         from relstorage.tests.blob.testblob import storage_reusable_suite
         dsn = os.environ.get('ORACLE_TEST_DSN', 'XE')
-        for keep_history in (False, True):
-            def create_storage(name, blob_dir, keep_history=keep_history):
-                from relstorage.storage import RelStorage
-                from relstorage.adapters.oracle import OracleAdapter
-                db = db_names[name]
-                if not keep_history:
-                    db += '_hf'
-                adapter = OracleAdapter(
-                    user=db,
-                    password='relstoragetest',
-                    dsn=dsn,
-                    options=Options(keep_history=keep_history),
+        for shared_blob_dir in (False, True):
+            for keep_history in (False, True):
+                def create_storage(name, blob_dir,
+                        shared_blob_dir=shared_blob_dir,
+                        keep_history=keep_history):
+                    from relstorage.storage import RelStorage
+                    from relstorage.adapters.oracle import OracleAdapter
+                    db = db_names[name]
+                    if not keep_history:
+                        db += '_hf'
+                    options = Options(
+                        keep_history=keep_history,
+                        shared_blob_dir=shared_blob_dir,
+                        blob_dir=os.path.abspath(blob_dir),
                     )
-                storage = RelStorage(adapter, name=name, create=True,
-                    blob_dir=os.path.abspath(blob_dir))
-                storage.zap_all()
-                return storage
+                    adapter = OracleAdapter(
+                        user=db,
+                        password='relstoragetest',
+                        dsn=dsn,
+                        options=options,
+                    )
+                    storage = RelStorage(adapter, name=name, options=options)
+                    storage.zap_all()
+                    return storage
 
-            if keep_history:
-                prefix = 'HPOracle'
-                pack_test_name = 'blob_packing.txt'
-            else:
-                prefix = 'HFOracle'
-                pack_test_name = 'blob_packing_history_free.txt'
+                prefix = 'Oracle%s%s' % (
+                    (shared_blob_dir and 'Shared' or 'Unshared'),
+                    (keep_history and 'WithHistory' or 'NoHistory'),
+                )
+                if keep_history:
+                    pack_test_name = 'blob_packing.txt'
+                else:
+                    pack_test_name = 'blob_packing_history_free.txt'
 
-            suite.addTest(storage_reusable_suite(
-                prefix, create_storage,
-                test_blob_storage_recovery=True,
-                test_packing=True,
-                test_undo=keep_history,
-                pack_test_name=pack_test_name,
-                ))
+                suite.addTest(storage_reusable_suite(
+                    prefix, create_storage,
+                    test_blob_storage_recovery=True,
+                    test_packing=True,
+                    test_undo=keep_history,
+                    pack_test_name=pack_test_name,
+                    ))
 
     return suite
 

Modified: relstorage/trunk/relstorage/tests/testpostgresql.py
===================================================================
--- relstorage/trunk/relstorage/tests/testpostgresql.py	2010-10-19 21:58:48 UTC (rev 117780)
+++ relstorage/trunk/relstorage/tests/testpostgresql.py	2010-10-20 00:01:00 UTC (rev 117781)
@@ -153,36 +153,44 @@
         pass
     else:
         from relstorage.tests.blob.testblob import storage_reusable_suite
-        for keep_history in (False, True):
-            def create_storage(name, blob_dir, keep_history=keep_history):
-                from relstorage.storage import RelStorage
-                from relstorage.adapters.postgresql import PostgreSQLAdapter
-                db = db_names[name]
-                if not keep_history:
-                    db += '_hf'
-                dsn = ('dbname=%s user=relstoragetest '
-                        'password=relstoragetest' % db)
-                adapter = PostgreSQLAdapter(
-                    dsn=dsn, options=Options(keep_history=keep_history))
-                storage = RelStorage(adapter, name=name, create=True,
-                    blob_dir=os.path.abspath(blob_dir))
-                storage.zap_all()
-                return storage
+        for shared_blob_dir in (False, True):
+            for keep_history in (False, True):
+                def create_storage(name, blob_dir,
+                        shared_blob_dir=shared_blob_dir,
+                        keep_history=keep_history):
+                    from relstorage.storage import RelStorage
+                    from relstorage.adapters.postgresql import PostgreSQLAdapter
+                    db = db_names[name]
+                    if not keep_history:
+                        db += '_hf'
+                    dsn = ('dbname=%s user=relstoragetest '
+                            'password=relstoragetest' % db)
+                    options = Options(
+                        keep_history=keep_history,
+                        shared_blob_dir=shared_blob_dir,
+                        blob_dir=os.path.abspath(blob_dir),
+                    )
+                    adapter = PostgreSQLAdapter(dsn=dsn, options=options)
+                    storage = RelStorage(adapter, name=name, options=options)
+                    storage.zap_all()
+                    return storage
 
-            if keep_history:
-                prefix = 'HPPostgreSQL'
-                pack_test_name = 'blob_packing.txt'
-            else:
-                prefix = 'HFPostgreSQL'
-                pack_test_name = 'blob_packing_history_free.txt'
+                prefix = 'PostgreSQL%s%s' % (
+                    (shared_blob_dir and 'Shared' or 'Unshared'),
+                    (keep_history and 'WithHistory' or 'NoHistory'),
+                )
+                if keep_history:
+                    pack_test_name = 'blob_packing.txt'
+                else:
+                    pack_test_name = 'blob_packing_history_free.txt'
 
-            suite.addTest(storage_reusable_suite(
-                prefix, create_storage,
-                test_blob_storage_recovery=True,
-                test_packing=True,
-                test_undo=keep_history,
-                pack_test_name=pack_test_name,
-                ))
+                suite.addTest(storage_reusable_suite(
+                    prefix, create_storage,
+                    test_blob_storage_recovery=True,
+                    test_packing=True,
+                    test_undo=keep_history,
+                    pack_test_name=pack_test_name,
+                    ))
 
     return suite
 



More information about the checkins mailing list