[Checkins] SVN: relstorage/trunk/ Checkpoint: optionally put blobs in the database. Some tests pass. :-)

Shane Hathaway shane at hathawaymix.org
Tue Oct 19 17:58:49 EDT 2010


Log message for revision 117780:
  Checkpoint: optionally put blobs in the database.  Some tests pass. :-)
  

Changed:
  U   relstorage/trunk/CHANGES.txt
  U   relstorage/trunk/README.txt
  U   relstorage/trunk/relstorage/adapters/interfaces.py
  U   relstorage/trunk/relstorage/adapters/mover.py
  U   relstorage/trunk/relstorage/adapters/mysql.py
  U   relstorage/trunk/relstorage/adapters/oracle.py
  U   relstorage/trunk/relstorage/adapters/postgresql.py
  U   relstorage/trunk/relstorage/adapters/schema.py
  A   relstorage/trunk/relstorage/blobhelper.py
  U   relstorage/trunk/relstorage/component.xml
  U   relstorage/trunk/relstorage/options.py
  U   relstorage/trunk/relstorage/storage.py
  U   relstorage/trunk/relstorage/tests/RecoveryStorage.py
  U   relstorage/trunk/relstorage/tests/testmysql.py
  D   relstorage/trunk/relstorage/util.py
  U   relstorage/trunk/setup.py

-=-
Modified: relstorage/trunk/CHANGES.txt
===================================================================
--- relstorage/trunk/CHANGES.txt	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/CHANGES.txt	2010-10-19 21:58:48 UTC (rev 117780)
@@ -2,8 +2,10 @@
 Next Release
 ------------
 
-- ...
+- Added an option to store ZODB blobs in the database.
 
+- Require setuptools or distribute.  Plain distutils is not sufficient.
+
 1.4.0 (2010-09-30)
 ------------------
 

Modified: relstorage/trunk/README.txt
===================================================================
--- relstorage/trunk/README.txt	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/README.txt	2010-10-19 21:58:48 UTC (rev 117780)
@@ -337,12 +337,43 @@
         If true, only reads may be executed against the storage.
 
 ``blob-dir``
-        If supplied, the storage will provide blob support; this
+        If supplied, the storage will provide ZODB blob support; this
         parameter specifies the name of the directory to hold blob data.
         The directory will be created if it does not exist. If no value
         (or an empty value) is provided, then no blob support will be
         provided.
 
+``shared-blob-dir``
+        If true (the default), the blob directory is assumed to be
+        shared among all clients using NFS or similar; blob data will
+        be stored only on the filesystem and not in the database. If
+        false, blob data is stored in the relational database and the
+        blob directory holds a cache of blobs. When this parameter is
+        false, the blob directory should not be shared among clients.
+
+``blob-cache-size``
+        Maximum size of the blob cache, in bytes.  If empty (the default),
+        the cache size isn't checked and the blob directory will
+        grow without bounds.
+
+        This option is ignored if shared-blob-dir is true.
+
+``blob-cache-size-check``
+        Blob cache check size as percent of blob-cache-size. The blob
+        cache size will be checked when this many bytes have been
+        loaded into the cache. Defaults to 10% of the blob cache size.
+        This option is ignored if shared-blob-dir is true.
+
+        This option is ignored if shared-blob-dir is true.
+
+``blob-chunk-size``
+        When ZODB blobs are stored in the database, RelStorage breaks
+        them into chunks to minimize the impact on RAM.  This
+        parameter specifies the chunk size for new blobs.  The
+        default is 1048576 (1 megabyte).
+
+        This option is ignored if shared-blob-dir is true.
+
 ``keep-history``
         If this parameter is set to true (the default), the adapter
         will create and use a history-preserving database schema
@@ -463,10 +494,12 @@
         The default is to disable memcached integration.
 
 ``cache-module-name``
-        Specifies which Python memcache module to use.  The default is
-        "memcache", a pure Python module.  An alternative module is
-        "relstorage.pylibmc_wrapper".  This setting has no effect
-        unless cache-servers is set.
+        Specifies which Python memcache module to use. The default is
+        "relstorage.pylibmc_wrapper", which requires pylibmc. An
+        alternative module is "memcache", a pure Python module, but the
+        current version of memcache (1.45) has bugs that make it
+        unreliable. This setting has no effect unless cache-servers is
+        set.
 
 ``cache-prefix``
         The prefix for all keys in the cache.  All clients using a

Modified: relstorage/trunk/relstorage/adapters/interfaces.py
===================================================================
--- relstorage/trunk/relstorage/adapters/interfaces.py	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/relstorage/adapters/interfaces.py	2010-10-19 21:58:48 UTC (rev 117780)
@@ -253,7 +253,7 @@
         This happens after conflict resolution.
         """
 
-    def move_from_temp(cursor, tid):
+    def move_from_temp(cursor, tid, txn_has_blobs):
         """Moved the temporarily stored objects to permanent storage.
 
         Returns the list of oids stored.
@@ -265,7 +265,19 @@
         tid is the integer tid of the transaction being committed.
         """
 
+    def download_blob(cursor, oid, tid, filename):
+        """Download a blob into a file.
 
+        Returns the size of the blob file in bytes.
+        """
+
+    def upload_blob(cursor, oid, tid, filename):
+        """Upload a blob from a file.
+
+        If tid is None, upload to the temporary table.
+        """
+
+
 class IOIDAllocator(Interface):
     """Allocate OIDs and control future allocation"""
 

Modified: relstorage/trunk/relstorage/adapters/mover.py
===================================================================
--- relstorage/trunk/relstorage/adapters/mover.py	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/relstorage/adapters/mover.py	2010-10-19 21:58:48 UTC (rev 117780)
@@ -54,13 +54,16 @@
         'replace_temp',
         'move_from_temp',
         'update_current',
+        'download_blob',
+        'upload_blob',
         )
 
-    def __init__(self, database_name, keep_history, runner=None,
+    def __init__(self, database_name, options, runner=None,
             Binary=None, inputsizes=None, version_detector=None):
         # The inputsizes parameter is for Oracle only.
         self.database_name = database_name
-        self.keep_history = keep_history
+        self.keep_history = options.keep_history
+        self.blob_chunk_size = options.blob_chunk_size
         self.runner = runner
         self.Binary = Binary
         self.inputsizes = inputsizes
@@ -373,7 +376,7 @@
 
 
     def postgresql_on_store_opened(self, cursor, restart=False):
-        """Create the temporary table for storing objects"""
+        """Create the temporary tables for storing objects"""
         # note that the md5 column is not used if self.keep_history == False.
         stmt = """
         CREATE TEMPORARY TABLE temp_store (
@@ -382,17 +385,25 @@
             md5         CHAR(32),
             state       BYTEA
         ) ON COMMIT DROP;
-        CREATE UNIQUE INDEX temp_store_zoid ON temp_store (zoid)
+        CREATE UNIQUE INDEX temp_store_zoid ON temp_store (zoid);
+
+        CREATE TEMPORARY TABLE temp_blob_chunk (
+            zoid        BIGINT NOT NULL,
+            chunk_num   BIGINT NOT NULL,
+            chunk       BYTEA
+        ) ON COMMIT DROP;
+        CREATE UNIQUE INDEX temp_blob_chunk_key
+            ON temp_blob_chunk (zoid, chunk_num);
         """
         cursor.execute(stmt)
 
     def mysql_on_store_opened(self, cursor, restart=False):
         """Create the temporary table for storing objects"""
         if restart:
-            stmt = """
-            DROP TEMPORARY TABLE IF EXISTS temp_store
-            """
+            stmt = "DROP TEMPORARY TABLE IF EXISTS temp_store"
             cursor.execute(stmt)
+            stmt = "DROP TEMPORARY TABLE IF EXISTS temp_blob_chunk"
+            cursor.execute(stmt)
 
         # note that the md5 column is not used if self.keep_history == False.
         stmt = """
@@ -405,6 +416,16 @@
         """
         cursor.execute(stmt)
 
+        stmt = """
+        CREATE TEMPORARY TABLE temp_blob_chunk (
+            zoid        BIGINT NOT NULL,
+            chunk_num   BIGINT NOT NULL,
+                        PRIMARY KEY (zoid, chunk_num),
+            chunk       LONGBLOB
+        ) ENGINE MyISAM
+        """
+        cursor.execute(stmt)
+
     # no store connection initialization needed for Oracle
     oracle_on_store_opened = None
 
@@ -789,7 +810,7 @@
 
 
 
-    def generic_move_from_temp(self, cursor, tid):
+    def generic_move_from_temp(self, cursor, tid, txn_has_blobs):
         """Moved the temporarily stored objects to permanent storage.
 
         Returns the list of oids stored.
@@ -809,6 +830,21 @@
                 """
             cursor.execute(stmt, (tid,))
 
+            if txn_has_blobs:
+                if self.database_name == 'oracle':
+                    stmt = """
+                    INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
+                    SELECT zoid, :1, chunk_num, chunk
+                    FROM temp_blob_chunk
+                    """
+                else:
+                    stmt = """
+                    INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
+                    SELECT zoid, %s, chunk_num, chunk
+                    FROM temp_blob_chunk
+                    """
+                cursor.execute(stmt, (tid,))
+
         else:
             if self.database_name == 'mysql':
                 stmt = """
@@ -839,6 +875,20 @@
                     """
                 cursor.execute(stmt, (tid,))
 
+            if txn_has_blobs:
+                stmt = """
+                DELETE FROM blob_chunk
+                WHERE zoid IN (SELECT zoid FROM temp_store)
+                """
+                cursor.execute(stmt)
+
+                stmt = """
+                INSERT INTO blob_chunk (zoid, chunk_num, chunk)
+                SELECT zoid, chunk_num, chunk
+                FROM temp_blob_chunk
+                """
+                cursor.execute(stmt)
+
         stmt = """
         SELECT zoid FROM temp_store
         """
@@ -923,3 +973,150 @@
         """
         cursor.execute(stmt, (tid,))
 
+
+
+
+    def generic_download_blob(self, cursor, oid, tid, filename):
+        """Download a blob into a file."""
+        if self.keep_history:
+            stmt = """
+            SELECT chunk
+            FROM blob_chunk
+            WHERE zoid = %s
+                AND tid = %s
+                AND chunk_num = %s
+            """
+            use_tid = True
+        else:
+            stmt = """
+            SELECT chunk
+            FROM blob_chunk
+            WHERE zoid = %s
+                AND chunk_num = %s
+            """
+            use_tid = False
+
+        use_base64 = False
+        if self.database_name == 'postgresql':
+            use_base64 = True
+            stmt = stmt.replace(
+                "SELECT chunk", "SELECT encode(chunk, 'base64')")
+        elif self.database_name == 'oracle':
+            for n in (1, 2, 3):
+                stmt = stmt.replace('%s', ':%d' % n, 1)
+
+        f = None
+        bytes = 0
+        try:
+            chunk_num = 0
+            while True:
+                if use_tid:
+                    params = (oid, tid, chunk_num)
+                else:
+                    params = (oid, chunk_num)
+                cursor.execute(stmt, params)
+                rows = list(cursor)
+                if not rows:
+                    # No more chunks. Note: if there are no chunks at
+                    # all, then this method will not write a
+                    # file.  This is by design.
+                    break
+                chunk = rows[0][0]
+                if use_base64:
+                    chunk = decodestring(chunk)
+                if f is None:
+                    f = open(filename, 'wb')
+                f.write(chunk)
+                bytes += len(chunk)
+                chunk_num += 1
+        except:
+            if f is not None:
+                f.close()
+                os.remove(filename)
+            raise
+
+        if f is not None:
+            f.close()
+        return bytes
+
+    mysql_download_blob = generic_download_blob
+    postgresql_download_blob = generic_download_blob
+    oracle_download_blob = generic_download_blob
+
+
+
+
+    def generic_upload_blob(self, cursor, oid, tid, filename):
+        """Upload a blob from a file.
+
+        If serial is None, upload to the temporary table.
+        """
+        if tid is not None:
+            if self.keep_history:
+                delete_stmt = """
+                DELETE FROM blob_chunk
+                WHERE zoid = %s
+                    AND tid = %s
+                """
+                insert_stmt = """
+                INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
+                VALUES (%s, %s, %s, CHUNK)
+                """
+                use_tid = True
+            else:
+                delete_stmt = "DELETE FROM blob_chunk WHERE zoid = %s"
+                insert_stmt = """
+                INSERT INTO blob_chunk (zoid, chunk_num, chunk)
+                VALUES (%s, %s, CHUNK)
+                """
+                use_tid = False
+        else:
+            delete_stmt = "DELETE FROM temp_blob_chunk WHERE zoid = %s"
+            insert_stmt = """
+            INSERT INTO temp_blob_chunk (zoid, chunk_num, chunk)
+            VALUES (%s, %s, CHUNK)
+            """
+            use_tid = False
+
+        use_base64 = False
+        if self.database_name == 'postgresql':
+            use_base64 = True
+            insert_stmt = insert_stmt.replace(
+                "CHUNK", "decode(%s, 'base64')", 1)
+        else:
+            insert_stmt = insert_stmt.replace("CHUNK", "%s")
+
+        if self.database_name == 'oracle':
+            for n in (1, 2, 3, 4):
+                delete_stmt = delete_stmt.replace('%s', ':%d' % n, 1)
+                insert_stmt = insert_stmt.replace('%s', ':%d' % n, 1)
+
+        if use_tid:
+            params = (oid, tid)
+        else:
+            params = (oid,)
+        cursor.execute(delete_stmt, params)
+
+        f = open(filename, 'rb')
+        try:
+            chunk_num = 0
+            while True:
+                chunk = f.read(self.blob_chunk_size)
+                if not chunk and chunk_num > 0:
+                    # EOF.  Note that we always write at least one
+                    # chunk, even if the blob file is empty.
+                    break
+                if use_base64:
+                    chunk = encodestring(chunk)
+                if use_tid:
+                    params = (oid, tid, chunk_num, chunk)
+                else:
+                    params = (oid, chunk_num, chunk)
+                cursor.execute(insert_stmt, params)
+                chunk_num += 1
+        finally:
+            f.close()
+
+    mysql_upload_blob = generic_upload_blob
+    postgresql_upload_blob = generic_upload_blob
+    oracle_upload_blob = generic_upload_blob

Modified: relstorage/trunk/relstorage/adapters/mysql.py
===================================================================
--- relstorage/trunk/relstorage/adapters/mysql.py	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/relstorage/adapters/mysql.py	2010-10-19 21:58:48 UTC (rev 117780)
@@ -111,7 +111,7 @@
             )
         self.mover = ObjectMover(
             database_name='mysql',
-            keep_history=self.keep_history,
+            options=options,
             Binary=MySQLdb.Binary,
             )
         self.connmanager.set_on_store_opened(self.mover.on_store_opened)

Modified: relstorage/trunk/relstorage/adapters/oracle.py
===================================================================
--- relstorage/trunk/relstorage/adapters/oracle.py	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/relstorage/adapters/oracle.py	2010-10-19 21:58:48 UTC (rev 117780)
@@ -93,7 +93,7 @@
             )
         self.mover = ObjectMover(
             database_name='oracle',
-            keep_history=self.keep_history,
+            options=options,
             runner=self.runner,
             Binary=cx_Oracle.Binary,
             inputsizes={

Modified: relstorage/trunk/relstorage/adapters/postgresql.py
===================================================================
--- relstorage/trunk/relstorage/adapters/postgresql.py	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/relstorage/adapters/postgresql.py	2010-10-19 21:58:48 UTC (rev 117780)
@@ -80,7 +80,7 @@
             )
         self.mover = ObjectMover(
             database_name='postgresql',
-            keep_history=self.keep_history,
+            options=options,
             runner=self.runner,
             version_detector=self.version_detector,
             )

Modified: relstorage/trunk/relstorage/adapters/schema.py
===================================================================
--- relstorage/trunk/relstorage/adapters/schema.py	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/relstorage/adapters/schema.py	2010-10-19 21:58:48 UTC (rev 117780)
@@ -19,7 +19,7 @@
 import re
 import time
 
-relstorage_op_version = '1.4'
+relstorage_op_version = '1.5'
 
 history_preserving_schema = """
 
@@ -80,15 +80,16 @@
     oracle:
         CREATE SEQUENCE zoid_seq;
 
-# object_state: All object states in all transactions. Note that md5
-# and state can be null to represent object uncreation.
+# object_state and blob_chunk: All object and blob states in all
+# transactions. Note that md5 and state can be null to represent object
+# uncreation.
 
     postgresql:
         CREATE TABLE object_state (
             zoid        BIGINT NOT NULL,
             tid         BIGINT NOT NULL REFERENCES transaction
                         CHECK (tid > 0),
-            PRIMARY KEY (zoid, tid),
+                        PRIMARY KEY (zoid, tid),
             prev_tid    BIGINT NOT NULL REFERENCES transaction,
             md5         CHAR(32),
             state       BYTEA
@@ -96,25 +97,51 @@
         CREATE INDEX object_state_tid ON object_state (tid);
         CREATE INDEX object_state_prev_tid ON object_state (prev_tid);
 
+        CREATE TABLE blob_chunk (
+            zoid        BIGINT NOT NULL,
+            tid         BIGINT NOT NULL,
+            chunk_num   BIGINT NOT NULL,
+                        PRIMARY KEY (zoid, tid, chunk_num),
+            chunk       BYTEA NOT NULL
+        );
+        CREATE INDEX blob_chunk_lookup ON blob_chunk (zoid, tid);
+        ALTER TABLE blob_chunk ADD CONSTRAINT blob_chunk_fk
+            FOREIGN KEY (zoid, tid)
+            REFERENCES object_state (zoid, tid)
+            ON DELETE CASCADE;
+
     mysql:
         CREATE TABLE object_state (
             zoid        BIGINT NOT NULL,
             tid         BIGINT NOT NULL REFERENCES transaction,
-            PRIMARY KEY (zoid, tid),
+                        PRIMARY KEY (zoid, tid),
+                        CHECK (tid > 0),
             prev_tid    BIGINT NOT NULL REFERENCES transaction,
             md5         CHAR(32) CHARACTER SET ascii,
-            state       LONGBLOB,
-            CHECK (tid > 0)
+            state       LONGBLOB
         ) ENGINE = InnoDB;
         CREATE INDEX object_state_tid ON object_state (tid);
         CREATE INDEX object_state_prev_tid ON object_state (prev_tid);
 
+        CREATE TABLE blob_chunk (
+            zoid        BIGINT NOT NULL,
+            tid         BIGINT NOT NULL,
+            chunk_num   BIGINT NOT NULL,
+                        PRIMARY KEY (zoid, tid, chunk_num),
+            chunk       LONGBLOB NOT NULL
+        ) ENGINE = InnoDB;
+        CREATE INDEX blob_chunk_lookup ON blob_chunk (zoid, tid);
+        ALTER TABLE blob_chunk ADD CONSTRAINT blob_chunk_fk
+            FOREIGN KEY (zoid, tid)
+            REFERENCES object_state (zoid, tid)
+            ON DELETE CASCADE;
+
     oracle:
         CREATE TABLE object_state (
             zoid        NUMBER(20) NOT NULL,
-            tid         NUMBER(20) NOT NULL REFERENCES transaction
-                        CHECK (tid > 0),
-            PRIMARY KEY (zoid, tid),
+            tid         NUMBER(20) NOT NULL REFERENCES transaction,
+                        PRIMARY KEY (zoid, tid),
+                        CHECK tid_min (tid > 0),
             prev_tid    NUMBER(20) NOT NULL REFERENCES transaction,
             md5         CHAR(32),
             state       BLOB
@@ -122,13 +149,27 @@
         CREATE INDEX object_state_tid ON object_state (tid);
         CREATE INDEX object_state_prev_tid ON object_state (prev_tid);
 
+        CREATE TABLE blob_chunk (
+            zoid        NUMBER(20) NOT NULL,
+            tid         NUMBER(20) NOT NULL,
+            chunk_num   NUMBER(20) NOT NULL,
+                        PRIMARY KEY (zoid, tid, chunk_num),
+            chunk       BLOB
+        );
+        CREATE INDEX blob_chunk_lookup ON blob_chunk (zoid, tid);
+        ALTER TABLE blob_chunk ADD CONSTRAINT blob_chunk_fk
+            FOREIGN KEY (zoid, tid)
+            REFERENCES object_state (zoid, tid)
+            ON DELETE CASCADE;
+
 # current_object: Pointers to the current object state
 
     postgresql:
         CREATE TABLE current_object (
             zoid        BIGINT NOT NULL PRIMARY KEY,
             tid         BIGINT NOT NULL,
-            FOREIGN KEY (zoid, tid) REFERENCES object_state
+                        FOREIGN KEY (zoid, tid)
+                            REFERENCES object_state (zoid, tid)
         );
         CREATE INDEX current_object_tid ON current_object (tid);
 
@@ -136,7 +177,8 @@
         CREATE TABLE current_object (
             zoid        BIGINT NOT NULL PRIMARY KEY,
             tid         BIGINT NOT NULL,
-            FOREIGN KEY (zoid, tid) REFERENCES object_state (zoid, tid)
+                        FOREIGN KEY (zoid, tid)
+                            REFERENCES object_state (zoid, tid)
         ) ENGINE = InnoDB;
         CREATE INDEX current_object_tid ON current_object (tid);
 
@@ -144,7 +186,8 @@
         CREATE TABLE current_object (
             zoid        NUMBER(20) NOT NULL PRIMARY KEY,
             tid         NUMBER(20) NOT NULL,
-            FOREIGN KEY (zoid, tid) REFERENCES object_state
+                        FOREIGN KEY (zoid, tid)
+                            REFERENCES object_state (zoid, tid)
         );
         CREATE INDEX current_object_tid ON current_object (tid);
 
@@ -288,9 +331,16 @@
             zoid        NUMBER(20) NOT NULL PRIMARY KEY,
             prev_tid    NUMBER(20) NOT NULL,
             md5         CHAR(32),
-            state       BLOB
+            state       BLOB,
+            blobdata    BLOB
         ) ON COMMIT DELETE ROWS;
 
+        CREATE GLOBAL TEMPORARY TABLE temp_blob_chunk (
+            zoid        NUMBER(20) NOT NULL,
+            chunk_num   NUMBER(20) NOT NULL,
+            chunk       BLOB
+        ) ON COMMIT DELETE ROWS;
+
         # Temporary state during packing: a list of objects
         # whose references need to be examined.
         CREATE GLOBAL TEMPORARY TABLE temp_pack_visit (
@@ -422,7 +472,7 @@
     oracle:
         CREATE SEQUENCE zoid_seq;
 
-# object_state: All object states in all transactions.
+# object_state and blob_chunk: All object states in all transactions.
 
     postgresql:
         CREATE TABLE object_state (
@@ -432,23 +482,60 @@
         );
         CREATE INDEX object_state_tid ON object_state (tid);
 
+        CREATE TABLE blob_chunk (
+            zoid        BIGINT NOT NULL,
+            chunk_num   BIGINT NOT NULL,
+                        PRIMARY KEY (zoid, chunk_num),
+            chunk       BYTEA NOT NULL
+        );
+        CREATE INDEX blob_chunk_lookup ON blob_chunk (zoid);
+        ALTER TABLE blob_chunk ADD CONSTRAINT blob_chunk_fk
+            FOREIGN KEY (zoid)
+            REFERENCES object_state (zoid)
+            ON DELETE CASCADE;
+
     mysql:
         CREATE TABLE object_state (
             zoid        BIGINT NOT NULL PRIMARY KEY,
             tid         BIGINT NOT NULL,
-            state       LONGBLOB,
-            CHECK (tid > 0)
+                        CHECK (tid > 0),
+            state       LONGBLOB
         ) ENGINE = InnoDB;
         CREATE INDEX object_state_tid ON object_state (tid);
 
+        CREATE TABLE blob_chunk (
+            zoid        BIGINT NOT NULL,
+            chunk_num   BIGINT NOT NULL,
+                        PRIMARY KEY (zoid, chunk_num),
+            chunk       LONGBLOB NOT NULL
+        );
+        CREATE INDEX blob_chunk_lookup ON blob_chunk (zoid);
+        ALTER TABLE blob_chunk ADD CONSTRAINT blob_chunk_fk
+            FOREIGN KEY (zoid)
+            REFERENCES object_state (zoid)
+            ON DELETE CASCADE;
+
     oracle:
         CREATE TABLE object_state (
             zoid        NUMBER(20) NOT NULL PRIMARY KEY,
             tid         NUMBER(20) NOT NULL,
+                        CHECK tid_min (tid > 0),
             state       BLOB
         );
         CREATE INDEX object_state_tid ON object_state (tid);
 
+        CREATE TABLE blob_chunk (
+            zoid        NUMBER(20) NOT NULL,
+            chunk_num   NUMBER(20) NOT NULL,
+                        PRIMARY KEY (zoid, chunk_num),
+            chunk       BLOB
+        );
+        CREATE INDEX blob_chunk_lookup ON blob_chunk (zoid);
+        ALTER TABLE blob_chunk ADD CONSTRAINT blob_chunk_fk
+            FOREIGN KEY (zoid)
+            REFERENCES object_state (zoid)
+            ON DELETE CASCADE;
+
 # object_ref: A list of referenced OIDs from each object_state. This
 # table is populated as needed during packing.
 
@@ -544,6 +631,12 @@
             state       BLOB
         ) ON COMMIT DELETE ROWS;
 
+        CREATE GLOBAL TEMPORARY TABLE temp_blob_chunk (
+            zoid        NUMBER(20) NOT NULL,
+            chunk_num   NUMBER(20) NOT NULL,
+            chunk       BLOB
+        ) ON COMMIT DELETE ROWS;
+
         # Temporary state during packing: a list of objects
         # whose references need to be examined.
         CREATE GLOBAL TEMPORARY TABLE temp_pack_visit (
@@ -629,6 +722,20 @@
     return '\n'.join(res)
 
 
+def filter_statements(script, expr):
+    res = []
+    match = False
+    for line in script.splitlines():
+        line = line.strip()
+        if not match and expr.search(line) is not None:
+            match = True
+        if match:
+            res.append(line)
+            if line.rstrip().endswith(';'):
+                match = False
+    return '\n'.join(res)
+
+
 class AbstractSchemaInstaller(object):
 
     # Keep this list in the same order as the schema scripts
@@ -638,6 +745,7 @@
         'transaction',
         'new_oid',
         'object_state',
+        'blob_chunk',
         'current_object',
         'object_ref',
         'object_refs_added',
@@ -645,9 +753,10 @@
         'pack_state',
         'pack_state_tid',
         'temp_store',
+        'temp_blob_chunk',
         'temp_pack_visit',
         'temp_undo',
-        )
+    )
 
     database_name = None  # provided by a subclass
 
@@ -683,6 +792,7 @@
                 self.create(cursor)
             else:
                 self.check_compatibility(cursor, tables)
+                self.update_schema(cursor, tables)
         self.connmanager.open_and_call(callback)
 
     def check_compatibility(self, cursor, tables):
@@ -692,15 +802,25 @@
                     "Schema mismatch: a history-preserving adapter "
                     "can not connect to a history-free database. "
                     "If you need to convert, use the zodbconvert utility."
-                    )
+                )
         else:
             if 'transaction' in tables and 'current_object' in tables:
                 raise StorageError(
                     "Schema mismatch: a history-free adapter "
                     "can not connect to a history-preserving database. "
                     "If you need to convert, use the zodbconvert utility."
-                    )
+                )
 
+    def update_schema(self, cursor, tables):
+        if not 'blob_chunk' in tables:
+            # Add the blob_chunk table (RelStorage 1.5+)
+            script = filter_script(
+                self.schema_script, self.database_name)
+            expr = (r'(CREATE|ALTER)\s+(GLOBAL TEMPORARY\s+)?(TABLE|INDEX)'
+                '\s+(temp_)?blob_chunk')
+            script = filter_statements(script, re.compile(expr, re.I))
+            self.runner.run_script(cursor, script)
+
     def zap_all(self):
         """Clear all data out of the database."""
         def callback(conn, cursor):
@@ -795,6 +915,7 @@
                 self.create(cursor)
             else:
                 self.check_compatibility(cursor, tables)
+                self.update_schema(cursor, tables)
             packages = self.list_packages(cursor)
             if packages.get('relstorage_op') != relstorage_op_version:
                 self.install_plsql(cursor)

Copied: relstorage/trunk/relstorage/blobhelper.py (from rev 117099, relstorage/trunk/relstorage/util.py)
===================================================================
--- relstorage/trunk/relstorage/blobhelper.py	                        (rev 0)
+++ relstorage/trunk/relstorage/blobhelper.py	2010-10-19 21:58:48 UTC (rev 117780)
@@ -0,0 +1,537 @@
+##############################################################################
+#
+# Copyright (c) 2009 Zope Foundation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Blob management utilities needed by RelStorage.
+
+Most of this code is lifted from ZODB/ZEO.
+"""
+
+from ZODB import POSException
+from ZODB import utils
+from ZODB.utils import p64
+from ZODB.utils import u64
+import logging
+import os
+import re
+import thread
+import threading
+import time
+import zc.lockfile
+
+
+try:
+    import ZODB.blob
+    from ZODB.blob import is_blob_record
+    # ZODB 3.9
+except ImportError:
+    try:
+        from ZODB.blob import Blob
+    except ImportError:
+        # ZODB < 3.8
+        def is_blob_record(record):
+            False
+    else:
+        # ZODB 3.8
+        import cPickle
+        import cStringIO
+
+        def find_global_Blob(module, class_):
+            if module == 'ZODB.blob' and class_ == 'Blob':
+                return Blob
+
+        def is_blob_record(record):
+            """Check whether a database record is a blob record.
+
+            This is primarily intended to be used when copying data from one
+            storage to another.
+
+            """
+            if record and ('ZODB.blob' in record):
+                unpickler = cPickle.Unpickler(cStringIO.StringIO(record))
+                unpickler.find_global = find_global_Blob
+
+                try:
+                    return unpickler.load() is Blob
+                except (MemoryError, KeyboardInterrupt, SystemExit):
+                    raise
+                except Exception:
+                    pass
+
+            return False
+
+
+class BlobHelper(object):
+    """Blob support for RelStorage.
+
+    There is one BlobHelper per storage instance.
+    """
+
+    # _txn_blobs: {oid->filename}; contains blob data for the
+    # currently uncommitted transaction.
+    _txn_blobs = None
+
+    def __init__(self, options, fshelper=None, cache_checker=None):
+        self.options = options
+        self.blob_dir = options.blob_dir
+        self.shared_blob_dir = options.shared_blob_dir
+
+        if fshelper is None:
+            if self.shared_blob_dir:
+                # Share files over NFS or similar
+                fshelper = ZODB.blob.FilesystemHelper(self.blob_dir)
+            else:
+                # The blob directory is a cache of the blobs
+                if 'zeocache' not in ZODB.blob.LAYOUTS:
+                    ZODB.blob.LAYOUTS['zeocache'] = BlobCacheLayout()
+                fshelper = ZODB.blob.FilesystemHelper(
+                    self.blob_dir, layout_name='zeocache')
+            fshelper.create()
+            fshelper.checkSecure()
+        self.fshelper = fshelper
+
+        if cache_checker is None:
+            cache_checker = BlobCacheChecker(options)
+        self.cache_checker = cache_checker
+
+    def new_instance(self):
+        return BlobHelper(options=self.options,
+            fshelper=self.fshelper, cache_checker=self.cache_checker)
+
+    def clear_temp(self):
+        self._txn_blobs = None
+
+    @property
+    def txn_has_blobs(self):
+        return bool(self._txn_blobs)
+
+    def close(self):
+        self.cache_checker.close()
+
+    def download_blob(self, adapter, cursor, oid, serial, filename):
+        """Download a blob into a file"""
+        bytes = adapter.mover.download_blob(
+            cursor, u64(oid), u64(serial), filename)
+        self.cache_checker.loaded(bytes)
+
+    def upload_blob(self, adapter, cursor, oid, serial, filename):
+        """Upload a blob from a file.
+
+        If serial is None, upload to the temporary table.
+        """
+        if serial is not None:
+            tid_int = u64(serial)
+        else:
+            tid_int = None
+        adapter.mover.upload_blob(cursor, u64(oid), tid_int, filename)
+
+    def loadBlob(self, adapter, cursor, oid, serial):
+        # Load a blob.  If it isn't present and we have a shared blob
+        # directory, then assume that it doesn't exist on the server
+        # and return None.
+
+        blob_filename = self.fshelper.getBlobFilename(oid, serial)
+        if self.shared_blob_dir:
+            if os.path.exists(blob_filename):
+                return blob_filename
+            else:
+                # We're using a server shared cache.  If the file isn't
+                # here, it's not anywhere.
+                raise POSException.POSKeyError("No blob file", oid, serial)
+
+        if os.path.exists(blob_filename):
+            return _accessed(blob_filename)
+
+        # First, we'll create the directory for this oid, if it doesn't exist.
+        self.fshelper.createPathForOID(oid)
+
+        # OK, it's not here and we (or someone) needs to get it.  We
+        # want to avoid getting it multiple times.  We want to avoid
+        # getting it multiple times even accross separate client
+        # processes on the same machine. We'll use file locking.
+
+        lock = _lock_blob(blob_filename)
+        try:
+            # We got the lock, so it's our job to download it.  First,
+            # we'll double check that someone didn't download it while we
+            # were getting the lock:
+
+            if os.path.exists(blob_filename):
+                return _accessed(blob_filename)
+
+            self.download_blob(adapter, cursor, oid, serial, blob_filename)
+
+            if os.path.exists(blob_filename):
+                return _accessed(blob_filename)
+
+            raise POSException.POSKeyError("No blob file", oid, serial)
+
+        finally:
+            lock.close()
+
+    def openCommittedBlobFile(self, adapter, cursor, oid, serial, blob=None):
+        blob_filename = self.loadBlob(adapter, cursor, oid, serial)
+        try:
+            if blob is None:
+                return open(blob_filename, 'rb')
+            else:
+                return ZODB.blob.BlobFile(blob_filename, 'r', blob)
+        except (IOError):
+            # The file got removed while we were opening.
+            # Fall through and try again with the protection of the lock.
+            pass
+
+        lock = _lock_blob(blob_filename)
+        try:
+            blob_filename = self.fshelper.getBlobFilename(oid, serial)
+            if not os.path.exists(blob_filename):
+                if self.shared_blob_dir:
+                    # We're using a server shared cache.  If the file isn't
+                    # here, it's not anywhere.
+                    raise POSException.POSKeyError("No blob file", oid, serial)
+                self.download_blob(adapter, cursor, oid, serial, blob_filename)
+                if not os.path.exists(blob_filename):
+                    raise POSException.POSKeyError("No blob file", oid, serial)
+
+            _accessed(blob_filename)
+            if blob is None:
+                return open(blob_filename, 'rb')
+            else:
+                return ZODB.blob.BlobFile(blob_filename, 'r', blob)
+        finally:
+            lock.close()
+
+    def temporaryDirectory(self):
+        return self.fshelper.temp_dir
+
+    def storeBlob(self, adapter, cursor, store,
+            oid, serial, data, blobfilename, version, txn):
+        """Storage API: store a blob object."""
+        assert not version
+
+        # Grab the file right away. That way, if we don't have enough
+        # room for a copy, we'll know now rather than in tpc_finish.
+        # Also, this releaves the client of having to manage the file
+        # (or the directory contianing it).
+        self.fshelper.getPathForOID(oid, create=True)
+        fd, target = self.fshelper.blob_mkstemp(oid, serial)
+        os.close(fd)
+
+        # It's a bit odd (and impossible on windows) to rename over
+        # an existing file.  We'll use the temporary file name as a base.
+        target += '-'
+        ZODB.blob.rename_or_copy_blob(blobfilename, target)
+        os.remove(target[:-1])
+        self._add_blob_to_transaction(oid, target)
+
+        store(oid, serial, data, '', txn)
+        if not self.shared_blob_dir:
+            self.upload_blob(adapter, cursor, oid, None, target)
+
+    def _add_blob_to_transaction(self, oid, filename):
+        if self._txn_blobs is None:
+            self._txn_blobs = {}
+        else:
+            old_filename = self._txn_blobs.get(oid)
+            if old_filename is not None and old_filename != filename:
+                ZODB.blob.remove_committed(old_filename)
+        self._txn_blobs[oid] = filename
+
+    def restoreBlob(self, adapter, cursor, oid, serial, blobfilename):
+        if self.shared_blob_dir:
+            self.fshelper.getPathForOID(oid, create=True)
+            targetname = self.fshelper.getBlobFilename(oid, serial)
+            ZODB.blob.rename_or_copy_blob(blobfilename, targetname)
+        else:
+            self.upload_blob(adapter, cursor, oid, serial, blobfilename)
+
+    def copy_undone(self, copied, tid):
+        """After an undo operation, copy the matching blobs forward.
+
+        The copied parameter is a list of (integer oid, integer tid).
+        """
+        if not self.shared_blob_dir:
+            # Not necessary
+            return
+
+        for oid_int, old_tid_int in copied:
+            oid = p64(oid_int)
+            old_tid = p64(old_tid_int)
+            orig_fn = self.fshelper.getBlobFilename(oid, old_tid)
+            if not os.path.exists(orig_fn):
+                # not a blob
+                continue
+
+            new_fn = self.fshelper.getBlobFilename(oid, tid)
+            orig = open(orig_fn, 'r')
+            new = open(new_fn, 'wb')
+            ZODB.utils.cp(orig, new)
+            orig.close()
+            new.close()
+
+            self._add_blob_to_transaction(oid, new_fn)
+
+    def after_pack(self, oid_int, tid_int):
+        """Called after an object state has been removed by packing.
+
+        Removes the corresponding blob file.
+        """
+        if not self.shared_blob_dir:
+            # Not necessary
+            return
+
+        oid = p64(oid_int)
+        tid = p64(tid_int)
+        fn = self.fshelper.getBlobFilename(oid, tid)
+        if self.adapter.keep_history:
+            # remove only the revision just packed
+            if os.path.exists(fn):
+                ZODB.blob.remove_committed(fn)
+                dirname = os.path.dirname(fn)
+                if not _has_files(dirname):
+                    ZODB.blob.remove_committed_dir(dirname)
+        else:
+            # remove all revisions
+            dirname = os.path.dirname(fn)
+            if os.path.exists(dirname):
+                for name in os.listdir(dirname):
+                    ZODB.blob.remove_committed(os.path.join(dirname, name))
+                ZODB.blob.remove_committed_dir(dirname)
+
+    def vote(self, tid):
+        if self._txn_blobs:
+            # We now have a transaction ID, so rename all the blobs
+            # accordingly.
+            for oid, sourcename in self._txn_blobs.items():
+                bytes = os.stat(sourcename).st_size
+                self.cache_checker.loaded(bytes, check=False)
+                targetname = self.fshelper.getBlobFilename(oid, tid)
+                if sourcename != targetname:
+                    lock = _lock_blob(targetname)
+                    try:
+                        ZODB.blob.rename_or_copy_blob(sourcename, targetname)
+                    finally:
+                        lock.close()
+                    self._txn_blobs[oid] = targetname
+            self.cache_checker.check(True)
+
+    def abort(self):
+        if self._txn_blobs:
+            for oid, filename in self._txn_blobs.iteritems():
+                if os.path.exists(filename):
+                    ZODB.blob.remove_committed(filename)
+                    if self.shared_blob_dir:
+                        dirname = os.path.dirname(filename)
+                        if not _has_files(dirname):
+                            ZODB.blob.remove_committed_dir(dirname)
+
+
+class BlobCacheChecker(object):
+    """Control the size of the blob cache.  Shared between BlobHelpers."""
+
+    def __init__(self, options):
+        self.blob_dir = options.blob_dir
+        self.shared_blob_dir = options.shared_blob_dir
+        self._blob_cache_size = options.blob_cache_size
+        self._blob_data_bytes_loaded = 0
+        if self._blob_cache_size is not None:
+            assert options.blob_cache_size_check < 100
+            self._blob_cache_size_check = (
+                self._blob_cache_size * options.blob_cache_size_check / 100)
+            self.check()
+
+    def close(self):
+        if self._check_blob_size_thread is not None:
+            self._check_blob_size_thread.join()
+
+    def loaded(self, bytes, check=True):
+        self._blob_data_bytes_loaded += bytes
+        if check:
+            self.check(True)
+
+    _check_blob_size_thread = None
+    def check(self, check_loaded=False):
+        """If appropriate, run blob cache cleanup in another thread."""
+        if self._blob_cache_size is None:
+            return
+        if self.shared_blob_dir or not self.blob_dir:
+            return
+
+        if (check_loaded and
+                self._blob_data_bytes_loaded < self._blob_cache_size_check):
+            return
+
+        self._blob_data_bytes_loaded = 0
+
+        target = max(self._blob_cache_size - self._blob_cache_size_check, 0)
+
+        check_blob_size_thread = threading.Thread(
+            target=_check_blob_cache_size,
+            args=(self.blob_dir, target),
+            )
+        check_blob_size_thread.setDaemon(True)
+        check_blob_size_thread.start()
+        self._check_blob_size_thread = check_blob_size_thread
+
+
+class BlobCacheLayout(object):
+
+    size = 997
+
+    def oid_to_path(self, oid):
+        return str(utils.u64(oid) % self.size)
+
+    def getBlobFilePath(self, oid, tid):
+        base, rem = divmod(utils.u64(oid), self.size)
+        return os.path.join(
+            str(rem),
+            "%s.%s%s" % (base, tid.encode('hex'), ZODB.blob.BLOB_SUFFIX)
+        )
+
+
+def _accessed(filename):
+    try:
+        os.utime(filename, (time.time(), os.stat(filename).st_mtime))
+    except OSError:
+        pass # We tried. :)
+    return filename
+
+cache_file_name = re.compile(r'\d+$').match
+def _check_blob_cache_size(blob_dir, target):
+
+    logger = logging.getLogger(__name__+'.check_blob_cache')
+
+    layout = open(os.path.join(blob_dir, ZODB.blob.LAYOUT_MARKER)
+                  ).read().strip()
+    if not layout == 'zeocache':
+        logger.critical("Invalid blob directory layout %s", layout)
+        raise ValueError("Invalid blob directory layout", layout)
+
+    attempt_path = os.path.join(blob_dir, 'check_size.attempt')
+
+    try:
+        check_lock = zc.lockfile.LockFile(
+            os.path.join(blob_dir, 'check_size.lock'))
+    except zc.lockfile.LockError:
+        try:
+            time.sleep(1)
+            check_lock = zc.lockfile.LockFile(
+                os.path.join(blob_dir, 'check_size.lock'))
+        except zc.lockfile.LockError:
+            # Someone is already cleaning up, so don't bother
+            logger.debug("%s Another thread is checking the blob cache size.",
+                         thread.get_ident())
+            open(attempt_path, 'w').close() # Mark that we tried
+            return
+
+    logger.debug("%s Checking blob cache size. (target: %s)",
+                 thread.get_ident(), target)
+
+    try:
+        while 1:
+            size = 0
+            blob_suffix = ZODB.blob.BLOB_SUFFIX
+            files_by_atime = BTrees.OOBTree.BTree()
+
+            for dirname in os.listdir(blob_dir):
+                if not cache_file_name(dirname):
+                    continue
+                base = os.path.join(blob_dir, dirname)
+                if not os.path.isdir(base):
+                    continue
+                for file_name in os.listdir(base):
+                    if not file_name.endswith(blob_suffix):
+                        continue
+                    file_path = os.path.join(base, file_name)
+                    if not os.path.isfile(file_path):
+                        continue
+                    stat = os.stat(file_path)
+                    size += stat.st_size
+                    t = stat.st_atime
+                    if t not in files_by_atime:
+                        files_by_atime[t] = []
+                    files_by_atime[t].append(os.path.join(dirname, file_name))
+
+            logger.debug("%s   blob cache size: %s", thread.get_ident(), size)
+
+            if size <= target:
+                if os.path.isfile(attempt_path):
+                    try:
+                        os.remove(attempt_path)
+                    except OSError:
+                        pass # Sigh, windows
+                    continue
+                logger.debug("%s   -->", thread.get_ident())
+                break
+
+            while size > target and files_by_atime:
+                for file_name in files_by_atime.pop(files_by_atime.minKey()):
+                    file_name = os.path.join(blob_dir, file_name)
+                    lockfilename = os.path.join(os.path.dirname(file_name),
+                                                '.lock')
+                    try:
+                        lock = zc.lockfile.LockFile(lockfilename)
+                    except zc.lockfile.LockError:
+                        logger.debug("%s Skipping locked %s",
+                                     thread.get_ident(),
+                                     os.path.basename(file_name))
+                        continue  # In use, skip
+
+                    try:
+                        fsize = os.stat(file_name).st_size
+                        try:
+                            ZODB.blob.remove_committed(file_name)
+                        except OSError, v:
+                            pass # probably open on windows
+                        else:
+                            size -= fsize
+                    finally:
+                        lock.close()
+
+                    if size <= target:
+                        break
+
+            logger.debug("%s   reduced blob cache size: %s",
+                         thread.get_ident(), size)
+
+    finally:
+        check_lock.close()
+
+def check_blob_size_script(args=None):
+    if args is None:
+        args = sys.argv[1:]
+    blob_dir, target = args
+    _check_blob_cache_size(blob_dir, int(target))
+
+def _lock_blob(path):
+    lockfilename = os.path.join(os.path.dirname(path), '.lock')
+    n = 0
+    while 1:
+        try:
+            return zc.lockfile.LockFile(lockfilename)
+        except zc.lockfile.LockError:
+            time.sleep(0.01)
+            n += 1
+            if n > 60000:
+                raise
+        else:
+            break
+
+def _has_files(dirname):
+    """Return True if a directory has any visible files."""
+    names = os.listdir(dirname)
+    if not names:
+        return False
+    for name in names:
+        if not name.startswith('.'):
+            return True
+    return False

Modified: relstorage/trunk/relstorage/component.xml
===================================================================
--- relstorage/trunk/relstorage/component.xml	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/relstorage/component.xml	2010-10-19 21:58:48 UTC (rev 117780)
@@ -12,205 +12,70 @@
     <section type="relstorage.adapter" name="*" attribute="adapter"/>
     <key name="name" datatype="string" required="no"/>
     <key name="read-only" datatype="boolean" default="false">
-      <description>
-        If true, only reads may be executed against the storage.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="blob-dir" required="no">
-      <description>
-        If supplied, the storage will provide blob support; this
-        parameter specifies the name of the directory to hold blob data.
-        The directory will be created if it doeesn't exist. If no value
-        (or an empty value) is provided, then no blob support will be
-        provided.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
+    <key name="shared-blob-dir" datatype="boolean" default="true">
+      <description>See the RelStorage README.txt file.</description>
+    </key>
+    <key name="blob-cache-size" datatype="integer" required="no">
+      <description>See the RelStorage README.txt file.</description>
+    </key>
+    <key name="blob-cache-size-check" datatype="integer" required="no">
+      <description>See the RelStorage README.txt file.</description>
+    </key>
+    <key name="blob-chunk-size" datatype="integer" required="no">
+      <description>See the RelStorage README.txt file.</description>
+    </key>
     <key name="keep-history" datatype="boolean" default="true">
-      <description>
-        If this parameter is set to true (the default), the adapter
-        will create and use a history-preserving database schema
-        (like FileStorage). A history-preserving schema supports
-        ZODB-level undo, but also grows more quickly and requires extensive
-        packing on a regular basis.
-
-        If this parameter is set to false, the adapter will create and
-        use a history-free database schema. Undo will not be supported,
-        but the database will not grow as quickly. The database will
-        still require regular garbage collection (which is accessible
-        through the database pack mechanism.)
-
-        This parameter must not change once the database schema has
-        been installed, because the schemas for history-preserving and
-        history-free storage are different. If you want to convert
-        between a history-preserving and a history-free database, use
-        the ``zodbconvert`` utility to copy to a new database.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="replica-conf" datatype="string" required="no">
-      <description>
-        If this parameter is provided, it specifies a text file that
-        contains a list of database replicas this adapter can choose
-        from. For MySQL and PostgreSQL, put in the replica file a list
-        of ``host:port`` or ``host`` values, one per line. For Oracle,
-        put in a list of DSN values. Blank lines and lines starting
-        with ``#`` are ignored.
-
-        The adapter prefers the first replica specified in the file. If
-        the first is not available, the adapter automatically tries the
-        rest of the replicas, in order. If the file changes, the
-        adapter will drop existing SQL database connections and make
-        new connections when ZODB starts a new transaction.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="replica-timeout" datatype="float" default="600.0">
-      <description>
-        If this parameter has a nonzero value, when the adapter selects
-        a replica other than the primary replica, the adapter will
-        try to revert to the primary replica after the specified
-        timeout (in seconds).  The default is 600, meaning 10 minutes.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="poll-interval" datatype="float" required="no">
-      <description>
-        Defer polling the database for the specified maximum time interval,
-        in seconds.  Set to 0 (the default) to always poll.  Fractional
-        seconds are allowed.  Use this to lighten the database load on
-        servers with high read volume and low write volume.
-
-        The poll-interval option works best in conjunction with
-        the cache-servers option.  If both are enabled, RelStorage will
-        poll a single cache key for changes on every request.
-        The database will not be polled unless the cache indicates
-        there have been changes, or the timeout specified by poll-interval
-        has expired.  This configuration keeps clients fully up to date,
-        while removing much of the polling burden from the database.
-        A good cluster configuration is to use memcache servers
-        and a high poll-interval (say, 60 seconds).
-
-        This option can be used without the cache-servers option,
-        but a large poll-interval without cache-servers increases the
-        probability of basing transactions on stale data, which does not
-        affect database consistency, but does increase the probability
-        of conflict errors, leading to low performance.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="pack-gc" datatype="boolean" default="true">
-      <description>
-        If pack-gc is false, pack operations do not perform
-        garbage collection.  Garbage collection is enabled by default.
-
-        If garbage collection is disabled, pack operations keep at least one
-        revision of every object.  With garbage collection disabled, the
-        pack code does not need to follow object references, making
-        packing conceivably much faster.  However, some of that benefit
-        may be lost due to an ever increasing number of unused objects.
-
-        Disabling garbage collection is also a hack that ensures
-        inter-database references never break.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="pack-dry-run" datatype="boolean" default="false">
-      <description>
-        If pack-dry-run is true, pack operations perform a full analysis
-        of what to pack, but no data is actually removed.  After a dry run,
-        the pack_object, pack_state, and pack_state_tid tables are filled
-        with the list of object states and objects that would have been
-        removed.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="pack-batch-timeout" datatype="float" required="no">
-      <description>
-        Packing occurs in batches of transactions; this specifies the
-        timeout in seconds for each batch.  Note that some database
-        configurations have unpredictable I/O performance
-        and might stall much longer than the timeout.
-        The default timeout is 5.0 seconds.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="pack-duty-cycle" datatype="float" required="no">
-      <description>
-        After each batch, the pack code pauses for a time to
-        allow concurrent transactions to commit.  The pack-duty-cycle
-        specifies what fraction of time should be spent on packing.
-        For example, if the duty cycle is 0.75, then 75% of the time
-        will be spent packing: a 6 second pack batch
-        will be followed by a 2 second delay.  The duty cycle should
-        be greater than 0.0 and less than or equal to 1.0.  Specify
-        1.0 for no delay between batches.
-
-        The default is 0.5.  Raise it to finish packing faster; lower it
-        to reduce the effect of packing on transaction commit performance.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="pack-max-delay" datatype="float" required="no">
-      <description>
-        This specifies a maximum delay between pack batches.  Sometimes
-        the database takes an extra long time to finish a pack batch; at
-        those times it is useful to cap the delay imposed by the
-        pack-duty-cycle.  The default is 20 seconds.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="cache-servers" datatype="string" required="no">
-      <description>
-        Specifies a list of memcached servers. Using memcached with
-        RelStorage improves the speed of frequent object accesses while
-        slightly reducing the speed of other operations.
-
-        Provide a list of host:port pairs, separated by whitespace.
-        "127.0.0.1:11211" is a common setting.  Some memcached modules,
-        such as pylibmc, allow you to specify a path to a Unix socket
-        instead of a host:port pair.
-
-        The default is to disable memcached integration.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="cache-module-name" datatype="string" required="no">
-      <description>
-        Specifies which Python memcache module to use.  The default is
-        "memcache", a pure Python module.  An alternative module is
-        "relstorage.pylibmc_wrapper".  This setting has no effect
-        unless cache-servers is set.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="cache-prefix" datatype="string" required="no">
-      <description>
-        The prefix for all keys in the cache.  All clients using a
-        database should use the same cache-prefix.  Use this if you use
-        a single cache for multiple databases.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="cache-local-mb" datatype="integer" required="no">
-      <description>
-        RelStorage caches pickled objects in memory, similar to a ZEO
-        cache. This cache is shared between threads. This parameter
-        configures the approximate maximum amount of memory the cache
-        should consume, in megabytes.  It defaults to 10.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="cache-delta-size-limit" datatype="integer" required="no">
-      <description>
-        This is an advanced option. RelStorage uses a system of
-        checkpoints to improve the cache hit rate. This parameter
-        configures how many objects should be stored before creating a
-        new checkpoint in the cache. The default is 10000.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="commit-lock-timeout" datatype="integer" required="no">
-      <description>
-        During commit, RelStorage acquires a database-wide lock. This
-        parameter specifies how long to wait for the lock before
-        failing the attempt to commit. The default is 30 seconds.
-
-        The MySQL and Oracle adapters support this parameter. The
-        PostgreSQL adapter currently does not.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
     <key name="commit-lock-id" datatype="integer" required="no">
-      <description>
-        During commit, RelStorage acquires a database-wide lock. This
-        parameter specifies the lock ID. This parameter currently
-        applies only to the Oracle adapter.
-      </description>
+      <description>See the RelStorage README.txt file.</description>
     </key>
   </sectiontype>
 

Modified: relstorage/trunk/relstorage/options.py
===================================================================
--- relstorage/trunk/relstorage/options.py	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/relstorage/options.py	2010-10-19 21:58:48 UTC (rev 117780)
@@ -28,6 +28,10 @@
         self.name = None
         self.read_only = False
         self.blob_dir = None
+        self.shared_blob_dir = True
+        self.blob_cache_size = None
+        self.blob_cache_size_check = 10
+        self.blob_chunk_size = 1 << 20
         self.keep_history = True
         self.replica_conf = None
         self.replica_timeout = 600.0
@@ -38,7 +42,7 @@
         self.pack_duty_cycle = 0.5
         self.pack_max_delay = 20.0
         self.cache_servers = ()  # ['127.0.0.1:11211']
-        self.cache_module_name = 'memcache'
+        self.cache_module_name = 'relstorage.pylibmc_wrapper'
         self.cache_prefix = ''
         self.cache_local_mb = 10
         self.cache_delta_size_limit = 10000

Modified: relstorage/trunk/relstorage/storage.py
===================================================================
--- relstorage/trunk/relstorage/storage.py	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/relstorage/storage.py	2010-10-19 21:58:48 UTC (rev 117780)
@@ -17,9 +17,10 @@
 """
 
 from persistent.TimeStamp import TimeStamp
+from relstorage.blobhelper import BlobHelper
+from relstorage.blobhelper import is_blob_record
 from relstorage.cache import StorageCache
 from relstorage.options import Options
-from relstorage.util import is_blob_record
 from ZODB.BaseStorage import DataRecord
 from ZODB.BaseStorage import TransactionRecord
 from ZODB import ConflictResolution
@@ -126,14 +127,10 @@
     # _poll_at is the time to force a poll
     _poll_at = 0
 
-    # If the blob directory is set, fshelper is a filesystem blob
-    # helper.  Otherwise, fshelper is None.
-    fshelper = None
+    # If the blob directory is set, blobhelper is a BlobHelper.
+    # Otherwise, blobhelper is None.
+    blobhelper = None
 
-    # _txn_blobs: {oid->filename}; contains blob data for the
-    # currently uncommitted transaction.
-    _txn_blobs = None
-
     # _txn_check_serials: {oid, serial}; confirms that certain objects
     # have not changed at commit.
     _txn_check_serials = None
@@ -147,7 +144,7 @@
     _batcher_row_limit = 100
 
     def __init__(self, adapter, name=None, create=True,
-            options=None, cache=None, **kwoptions):
+            options=None, cache=None, blobhelper=None, **kwoptions):
         self._adapter = adapter
 
         if options is None:
@@ -190,13 +187,33 @@
         else:
             self._cache = StorageCache(adapter, options)
 
-        if options.blob_dir:
-            from ZODB.blob import FilesystemHelper
-            self.fshelper = FilesystemHelper(options.blob_dir)
-            if create:
-                self.fshelper.create()
-                self.fshelper.checkSecure()
+        if blobhelper is not None:
+            self.blobhelper = blobhelper
+        elif options.blob_dir:
+            self.blobhelper = BlobHelper(options=options)
 
+    def new_instance(self):
+        """Creates and returns another storage instance.
+
+        See ZODB.interfaces.IMVCCStorage.
+        """
+        adapter = self._adapter.new_instance()
+        cache = self._cache.new_instance()
+        if self.blobhelper is not None:
+            blobhelper = self.blobhelper.new_instance()
+        else:
+            blobhelper = None
+        other = RelStorage(adapter=adapter, name=self.__name__,
+            create=False, options=self._options, cache=cache,
+            blobhelper=blobhelper)
+        self._instances.append(weakref.ref(other, self._instances.remove))
+        return other
+
+    @property
+    def fshelper(self):
+        """Used in tests"""
+        return self.blobhelper.fshelper
+
     def _open_load_connection(self):
         """Open the load connection to the database.  Return nothing."""
         conn, cursor = self._adapter.connmanager.open_for_load()
@@ -325,6 +342,8 @@
             self._closed = True
             self._drop_load_connection()
             self._drop_store_connection()
+            if self.blobhelper is not None:
+                self.blobhelper.close()
             for wref in self._instances:
                 instance = wref()
                 if instance is not None:
@@ -332,18 +351,6 @@
         finally:
             self._lock_release()
 
-    def new_instance(self):
-        """Creates and returns another storage instance.
-
-        See ZODB.interfaces.IMVCCStorage.
-        """
-        adapter = self._adapter.new_instance()
-        cache = self._cache.new_instance()
-        other = RelStorage(adapter=adapter, name=self.__name__,
-            create=False, options=self._options, cache=cache)
-        self._instances.append(weakref.ref(other))
-        return other
-
     def __len__(self):
         return self._adapter.stats.get_object_count()
 
@@ -714,9 +721,11 @@
         self._prepared_txn = None
         self._max_stored_oid = 0
         self._batcher = None
-        self._txn_blobs = None
         self._txn_check_serials = None
         self._cache.clear_temp()
+        blobhelper = self.blobhelper
+        if blobhelper is not None:
+            blobhelper.clear_temp()
 
 
     def _finish_store(self):
@@ -759,7 +768,11 @@
         # Move the new states into the permanent table
         tid_int = u64(self._tid)
         serials = []
-        oid_ints = adapter.mover.move_from_temp(cursor, tid_int)
+        if self.blobhelper is not None:
+            txn_has_blobs = self.blobhelper.txn_has_blobs
+        else:
+            txn_has_blobs = False
+        oid_ints = adapter.mover.move_from_temp(cursor, tid_int, txn_has_blobs)
         for oid_int in oid_ints:
             oid = p64(oid_int)
             if oid in resolved:
@@ -832,14 +845,8 @@
         self._prepared_txn = self._adapter.txncontrol.commit_phase1(
             conn, cursor, tid_int)
 
-        if self._txn_blobs:
-            # We now have a transaction ID, so rename all the blobs
-            # accordingly.
-            for oid, sourcename in self._txn_blobs.items():
-                targetname = self.fshelper.getBlobFilename(oid, self._tid)
-                if sourcename != targetname:
-                    ZODB.blob.rename_or_copy_blob(sourcename, targetname)
-                    self._txn_blobs[oid] = targetname
+        if self.blobhelper is not None:
+            self.blobhelper.vote(self._tid)
 
         return serials
 
@@ -883,23 +890,7 @@
         # including cache updates.
         self._ltid = self._tid
 
-        #if self._txn_blobs and not self._adapter.keep_history:
-            ## For each blob just committed, get the name of
-            ## one earlier revision (if any) and write the
-            ## name of the file to a log.  At pack time,
-            ## all the files in the log will be deleted and
-            ## the log will be cleared.
-            #for oid, filename in self._txn_blobs.iteritems():
-                #dirname, current_name = os.path.split(filename)
-                #names = os.listdir(dirname)
-                #names.sort()
-                #if current_name in names:
-                    #i = names.index(current_name)
-                    #if i > 0:
-                    #    to_delete = os.path.join(dirname, names[i-1])
-                    #    log.write('%s\n') % to_delete
 
-
     def tpc_abort(self, transaction):
         self._lock_acquire()
         try:
@@ -922,13 +913,8 @@
             self._adapter.txncontrol.abort(
                 self._store_conn, self._store_cursor, self._prepared_txn)
             self._adapter.locker.release_commit_lock(self._store_cursor)
-        if self._txn_blobs:
-            for oid, filename in self._txn_blobs.iteritems():
-                if os.path.exists(filename):
-                    ZODB.blob.remove_committed(filename)
-                    dirname = os.path.dirname(filename)
-                    if not os.listdir(dirname):
-                        ZODB.blob.remove_committed_dir(dirname)
+        if self.blobhelper is not None:
+            self.blobhelper.abort()
 
     def lastTransaction(self):
         self._lock_acquire()
@@ -1077,8 +1063,8 @@
                 # the new current objects.
                 adapter.mover.update_current(cursor, self_tid_int)
 
-                if self.fshelper is not None:
-                    self._copy_undone_blobs(copied)
+                if self.blobhelper is not None:
+                    self.blobhelper.copy_undone(copied, self._tid)
 
                 return self._tid, oids
             finally:
@@ -1086,28 +1072,6 @@
         finally:
             self._lock_release()
 
-    def _copy_undone_blobs(self, copied):
-        """After an undo operation, copy the matching blobs forward.
-
-        The copied parameter is a list of (integer oid, integer tid).
-        """
-        for oid_int, old_tid_int in copied:
-            oid = p64(oid_int)
-            old_tid = p64(old_tid_int)
-            orig_fn = self.fshelper.getBlobFilename(oid, old_tid)
-            if not os.path.exists(orig_fn):
-                # not a blob
-                continue
-
-            new_fn = self.fshelper.getBlobFilename(oid, self._tid)
-            orig = open(orig_fn, 'r')
-            new = open(new_fn, 'wb')
-            ZODB.utils.cp(orig, new)
-            orig.close()
-            new.close()
-
-            self._add_blob_to_transaction(oid, new_fn)
-
     def pack(self, t, referencesf, sleep=None):
         if self._is_read_only:
             raise POSException.ReadOnlyError()
@@ -1157,8 +1121,8 @@
                     log.info("pack: dry run complete")
                 else:
                     # Now pack.
-                    if self.fshelper is not None:
-                        packed_func = self._after_pack
+                    if self.blobhelper is not None:
+                        packed_func = self.blobhelper.after_pack
                     else:
                         packed_func = None
                     adapter.packundo.pack(tid_int, self._options, sleep=sleep,
@@ -1172,34 +1136,11 @@
 
         self._pack_finished()
 
-    def _after_pack(self, oid_int, tid_int):
-        """Called after an object state has been removed by packing.
-
-        Removes the corresponding blob file.
-        """
-        oid = p64(oid_int)
-        tid = p64(tid_int)
-        fn = self.fshelper.getBlobFilename(oid, tid)
-        if self._adapter.keep_history:
-            # remove only the revision just packed
-            if os.path.exists(fn):
-                ZODB.blob.remove_committed(fn)
-                dirname = os.path.dirname(fn)
-                if not os.listdir(dirname):
-                    ZODB.blob.remove_committed_dir(dirname)
-        else:
-            # remove all revisions
-            dirname = os.path.dirname(fn)
-            if os.path.exists(dirname):
-                for name in os.listdir(dirname):
-                    ZODB.blob.remove_committed(os.path.join(dirname, name))
-                ZODB.blob.remove_committed_dir(dirname)
-
     def _pack_finished(self):
-        if self.fshelper is None or self._adapter.keep_history:
+        if self.blobhelper is None or self._adapter.keep_history:
             return
 
-        # Remove all old revisions of blobs.
+        # TODO: Remove all old revisions of blobs in history-free mode.
 
     def iterator(self, start=None, stop=None):
         return TransactionIterator(self._adapter, start, stop)
@@ -1268,7 +1209,7 @@
         return changes, new_polled_tid
 
     def poll_invalidations(self):
-        """Looks for OIDs of objects that changed since _prev_polled_tid
+        """Look for OIDs of objects that changed since _prev_polled_tid.
 
         Returns {oid: 1}, or None if all objects need to be invalidated
         because prev_polled_tid is not in the database (presumably it
@@ -1306,14 +1247,15 @@
 
         Raises POSKeyError if the blobfile cannot be found.
         """
-        if self.fshelper is None:
+        if self.blobhelper is None:
             raise POSException.Unsupported("No blob directory is configured.")
 
-        blob_filename = self.fshelper.getBlobFilename(oid, serial)
-        if os.path.exists(blob_filename):
-            return blob_filename
-        else:
-            raise POSKeyError("No blob file", oid, serial)
+        self._lock_acquire()
+        try:
+            cursor = self._load_cursor
+            return self.blobhelper.loadBlob(self._adapter, cursor, oid, serial)
+        finally:
+            self._lock_release()
 
     def openCommittedBlobFile(self, oid, serial, blob=None):
         """Return a file for committed data for the given object id and serial
@@ -1326,20 +1268,22 @@
         make sure that data are available at least long enough for the
         file to be opened.
         """
-        blob_filename = self.loadBlob(oid, serial)
-        if blob is None:
-            return open(blob_filename, 'rb')
-        else:
-            return ZODB.blob.BlobFile(blob_filename, 'r', blob)
+        self._lock_acquire()
+        try:
+            cursor = self._load_cursor
+            return self.blobhelper.openCommittedBlobFile(
+                self._adapter, cursor, oid, serial, blob=blob)
+        finally:
+            self._lock_release()
 
     def temporaryDirectory(self):
         """Return a directory that should be used for uncommitted blob data.
 
         If Blobs use this, then commits can be performed with a simple rename.
         """
-        return self.fshelper.temp_dir
+        return self.blobhelper.temporaryDirectory()
 
-    def storeBlob(self, oid, oldserial, data, blobfilename, version, txn):
+    def storeBlob(self, oid, serial, data, blobfilename, version, txn):
         """Stores data that has a BLOB attached.
 
         The blobfilename argument names a file containing blob data.
@@ -1350,8 +1294,14 @@
         The new serial is returned.
         """
         assert not version
-        self.store(oid, oldserial, data, '', txn)
-        self._store_blob_data(oid, oldserial, blobfilename)
+        self._lock_acquire()
+        try:
+            self._batcher.flush()
+            cursor = self._store_cursor
+            self.blobhelper.storeBlob(self._adapter, cursor, self.store,
+                oid, serial, data, blobfilename, version, txn)
+        finally:
+            self._lock_release()
         return None
 
     def restoreBlob(self, oid, serial, data, blobfilename, prev_txn, txn):
@@ -1362,44 +1312,20 @@
         self.restore(oid, serial, data, '', prev_txn, txn)
         self._lock_acquire()
         try:
-            self.fshelper.getPathForOID(oid, create=True)
-            targetname = self.fshelper.getBlobFilename(oid, serial)
-            ZODB.blob.rename_or_copy_blob(blobfilename, targetname)
+            self._batcher.flush()
+            cursor = self._store_cursor
+            self.blobhelper.restoreBlob(
+                self._adapter, cursor, oid, serial, blobfilename)
         finally:
             self._lock_release()
 
-    def _store_blob_data(self, oid, oldserial, filename):
-        self.fshelper.getPathForOID(oid, create=True)
-        fd, target = self.fshelper.blob_mkstemp(oid, oldserial)
-        os.close(fd)
-        if sys.platform == 'win32':
-            # On windows, we can't rename to an existing file.  We'll
-            # use a slightly different file name. We keep the old one
-            # until we're done to avoid conflicts. Then remove the old name.
-            target += 'w'
-            ZODB.blob.rename_or_copy_blob(filename, target)
-            os.remove(target[:-1])
-        else:
-            ZODB.blob.rename_or_copy_blob(filename, target)
-
-        self._add_blob_to_transaction(oid, target)
-
-    def _add_blob_to_transaction(self, oid, filename):
-        if self._txn_blobs is None:
-            self._txn_blobs = {}
-        else:
-            old_filename = self._txn_blobs.get(oid)
-            if old_filename is not None and old_filename != filename:
-                ZODB.blob.remove_committed(old_filename)
-        self._txn_blobs[oid] = filename
-
     def copyTransactionsFrom(self, other):
         # adapted from ZODB.blob.BlobStorageMixin
         for trans in other.iterator():
             self.tpc_begin(trans, trans.tid, trans.status)
             for record in trans:
                 blobfilename = None
-                if self.fshelper is not None:
+                if self.blobhelper is not None:
                     if is_blob_record(record.data):
                         try:
                             blobfilename = other.loadBlob(
@@ -1408,7 +1334,8 @@
                             pass
                 if blobfilename is not None:
                     fd, name = tempfile.mkstemp(
-                        suffix='.tmp', dir=self.fshelper.temp_dir)
+                        suffix='.tmp',
+                        dir=self.blobhelper.temporaryDirectory())
                     os.close(fd)
                     ZODB.utils.cp(open(blobfilename, 'rb'), open(name, 'wb'))
                     self.restoreBlob(record.oid, record.tid, record.data,

Modified: relstorage/trunk/relstorage/tests/RecoveryStorage.py
===================================================================
--- relstorage/trunk/relstorage/tests/RecoveryStorage.py	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/relstorage/tests/RecoveryStorage.py	2010-10-19 21:58:48 UTC (rev 117780)
@@ -16,7 +16,7 @@
 # This is copied from ZODB.tests.RecoveryStorage and expanded to fit
 # history-free storages.
 
-from relstorage.util import is_blob_record
+from relstorage.blobhelper import is_blob_record
 from transaction import Transaction
 from ZODB import DB
 from ZODB.serialize import referencesf

Modified: relstorage/trunk/relstorage/tests/testmysql.py
===================================================================
--- relstorage/trunk/relstorage/tests/testmysql.py	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/relstorage/tests/testmysql.py	2010-10-19 21:58:48 UTC (rev 117780)
@@ -156,38 +156,47 @@
         pass
     else:
         from relstorage.tests.blob.testblob import storage_reusable_suite
-        for keep_history in (False, True):
-            def create_storage(name, blob_dir, keep_history=keep_history):
-                from relstorage.storage import RelStorage
-                from relstorage.adapters.mysql import MySQLAdapter
-                db = db_names[name]
-                if not keep_history:
-                    db += '_hf'
-                adapter = MySQLAdapter(
-                    options=Options(keep_history=keep_history),
-                    db=db,
-                    user='relstoragetest',
-                    passwd='relstoragetest',
+        for shared_blob_dir in (False, True):
+            for keep_history in (False, True):
+                def create_storage(name, blob_dir,
+                        shared_blob_dir=shared_blob_dir,
+                        keep_history=keep_history):
+                    from relstorage.storage import RelStorage
+                    from relstorage.adapters.mysql import MySQLAdapter
+                    db = db_names[name]
+                    if not keep_history:
+                        db += '_hf'
+                    options = Options(
+                        keep_history=keep_history,
+                        shared_blob_dir=shared_blob_dir,
+                        blob_dir=os.path.abspath(blob_dir),
                     )
-                storage = RelStorage(adapter, name=name, create=True,
-                    blob_dir=os.path.abspath(blob_dir))
-                storage.zap_all()
-                return storage
+                    adapter = MySQLAdapter(
+                        options=options,
+                        db=db,
+                        user='relstoragetest',
+                        passwd='relstoragetest',
+                    )
+                    storage = RelStorage(adapter, name=name, options=options)
+                    storage.zap_all()
+                    return storage
 
-            if keep_history:
-                prefix = 'HPMySQL'
-                pack_test_name = 'blob_packing.txt'
-            else:
-                prefix = 'HFMySQL'
-                pack_test_name = 'blob_packing_history_free.txt'
+                prefix = 'MySQL%s%s' % (
+                    (shared_blob_dir and 'Shared' or 'Unshared'),
+                    (keep_history and 'WithHistory' or 'NoHistory'),
+                )
+                if keep_history:
+                    pack_test_name = 'blob_packing.txt'
+                else:
+                    pack_test_name = 'blob_packing_history_free.txt'
 
-            suite.addTest(storage_reusable_suite(
-                prefix, create_storage,
-                test_blob_storage_recovery=True,
-                test_packing=True,
-                test_undo=keep_history,
-                pack_test_name=pack_test_name,
-                ))
+                suite.addTest(storage_reusable_suite(
+                    prefix, create_storage,
+                    test_blob_storage_recovery=True,
+                    test_packing=True,
+                    test_undo=keep_history,
+                    pack_test_name=pack_test_name,
+                    ))
 
     return suite
 

Deleted: relstorage/trunk/relstorage/util.py
===================================================================
--- relstorage/trunk/relstorage/util.py	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/relstorage/util.py	2010-10-19 21:58:48 UTC (rev 117780)
@@ -1,53 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2009 Zope Foundation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Utilities needed by RelStorage"""
-
-try:
-    from ZODB.blob import is_blob_record
-    # ZODB 3.9
-except ImportError:
-    try:
-        from ZODB.blob import Blob
-    except ImportError:
-        # ZODB < 3.8
-        def is_blob_record(record):
-            False
-    else:
-        # ZODB 3.8
-        import cPickle
-        import cStringIO
-
-        def find_global_Blob(module, class_):
-            if module == 'ZODB.blob' and class_ == 'Blob':
-                return Blob
-
-        def is_blob_record(record):
-            """Check whether a database record is a blob record.
-
-            This is primarily intended to be used when copying data from one
-            storage to another.
-
-            """
-            if record and ('ZODB.blob' in record):
-                unpickler = cPickle.Unpickler(cStringIO.StringIO(record))
-                unpickler.find_global = find_global_Blob
-
-                try:
-                    return unpickler.load() is Blob
-                except (MemoryError, KeyboardInterrupt, SystemExit):
-                    raise
-                except Exception:
-                    pass
-
-            return False

Modified: relstorage/trunk/setup.py
===================================================================
--- relstorage/trunk/setup.py	2010-10-19 16:23:06 UTC (rev 117779)
+++ relstorage/trunk/setup.py	2010-10-19 21:58:48 UTC (rev 117780)
@@ -21,7 +21,6 @@
 # Development Status :: 3 - Alpha
 
 classifiers = """\
-Development Status :: 5 - Production/Stable
 Intended Audience :: Developers
 License :: OSI Approved :: Zope Public License
 Programming Language :: Python
@@ -32,31 +31,7 @@
 """
 
 import os
-try:
-    from setuptools import setup
-except ImportError:
-    from distutils.core import setup
-    setup_args = dict(
-        scripts=['relstorage/zodbconvert.py'],
-    )
-else:
-    setup_args = dict(
-        zip_safe=False,  # otherwise ZConfig can't see component.xml
-        install_requires=[
-            'ZODB3>=3.7.0',
-            'zope.interface',
-            ],
-        extras_require={
-            'mysql':      ['MySQL-python>=1.2.2'],
-            'postgresql': ['psycopg2>=2.0'],
-            'oracle':     ['cx_Oracle>=4.3.1'],
-            },
-        entry_points = {'console_scripts': [
-            'zodbconvert = relstorage.zodbconvert:main',
-            'zodbpack = relstorage.zodbpack:main',
-            ]},
-        test_suite='relstorage.tests.alltests.make_suite',
-    )
+from setuptools import setup
 
 doclines = __doc__.split("\n")
 
@@ -78,7 +53,7 @@
         'relstorage.adapters.tests',
         'relstorage.tests',
         'relstorage.tests.blob',
-        ],
+    ],
     package_data={
         'relstorage': ['component.xml'],
     },
@@ -91,5 +66,20 @@
         "Change History\n" +
         "==============\n\n" +
         read_file("CHANGES.txt")),
-    **setup_args
-    )
+    zip_safe=False,  # otherwise ZConfig can't see component.xml
+    install_requires=[
+        'ZODB3>=3.7.0',
+        'zope.interface',
+        'zc.lockfile',
+    ],
+    extras_require={
+        'mysql':      ['MySQL-python>=1.2.2'],
+        'postgresql': ['psycopg2>=2.0'],
+        'oracle':     ['cx_Oracle>=4.3.1'],
+    },
+    entry_points = {'console_scripts': [
+        'zodbconvert = relstorage.zodbconvert:main',
+        'zodbpack = relstorage.zodbpack:main',
+    ]},
+    test_suite='relstorage.tests.alltests.make_suite',
+)



More information about the checkins mailing list