[Zodb-checkins] CVS: ZODB3/ZODB/FileStorage - FileStorage.py:1.1.2.19 format.py:1.1.2.5 fspack.py:1.1.2.2

Jeremy Hylton cvs-admin at zope.org
Tue Dec 2 02:11:06 EST 2003


Update of /cvs-repository/ZODB3/ZODB/FileStorage
In directory cvs.zope.org:/tmp/cvs-serv24432/ZODB/FileStorage

Modified Files:
      Tag: ZODB3-mvcc-2-branch
	FileStorage.py format.py fspack.py 
Log Message:
First cut at removal of serial numbers.
Rename loadNonCurrent() to loadBefore() (Jim's suggestion).

A few tests fail, but it's close enough to share the code with Tim.

In all case, the _p_serial attribute of a Persistent object matches
the id of the transaction that wrote the current revision.  Within the
storage API, we eliminate the abortVersion case where the txn id is
greater than the serial number.

Rename as many variables and attributes from serial to tid, with the
exception of store() and restore() arguments.  They're being passed in
from a client, which is getting the value from _p_serial; it makes
some sense there.

When tid and serial were both returned, eliminate serial and just use
tid.  

Replace "serial" with "tid" in some methods names, but not all.  I'll
get to the rest tomorrow.  The remaining ones are just used a lot.

Add XXX comment about now-bogus ZEO protocol that passes identical
serialnos for every object committed.


=== ZODB3/ZODB/FileStorage/FileStorage.py 1.1.2.18 => 1.1.2.19 ===
--- ZODB3/ZODB/FileStorage/FileStorage.py:1.1.2.18	Mon Dec  1 10:16:10 2003
+++ ZODB3/ZODB/FileStorage/FileStorage.py	Tue Dec  2 02:10:31 2003
@@ -35,7 +35,7 @@
 from ZODB.TimeStamp import TimeStamp
 from ZODB.lock_file import LockFile
 from ZODB.utils import p64, u64, cp, z64
-from ZODB.fspack import FileStoragePacker
+from ZODB.FileStorage.fspack import FileStoragePacker
 from ZODB.FileStorage.format \
      import FileStorageFormatter, DataHeader, TxnHeader, DATA_HDR, \
      DATA_HDR_LEN, TRANS_HDR, TRANS_HDR_LEN, CorruptedDataError, \
@@ -112,7 +112,7 @@
 
     _records_before_save = 10000
 
-    def __init__(self, file_name, create=0, read_only=0, stop=None,
+    def __init__(self, file_name, create=False, read_only=False, stop=None,
                  quota=None):
 
         if read_only:
@@ -139,9 +139,9 @@
         BaseStorage.BaseStorage.__init__(self, file_name)
 
         (index, vindex, tindex, tvindex,
-         oid2serial, toid2serial, toid2serial_delete) = self._newIndexes()
+         oid2tid, toid2tid, toid2tid_delete) = self._newIndexes()
         self._initIndex(index, vindex, tindex, tvindex,
-                        oid2serial, toid2serial, toid2serial_delete)
+                        oid2tid, toid2tid, toid2tid_delete)
 
         # Now open the file
 
@@ -176,7 +176,7 @@
             index, vindex, start, maxoid, ltid = r
 
             self._initIndex(index, vindex, tindex, tvindex,
-                            oid2serial, toid2serial, toid2serial_delete)
+                            oid2tid, toid2tid, toid2tid_delete)
             self._pos, self._oid, tid = read_index(
                 self._file, file_name, index, vindex, tindex, stop,
                 ltid=ltid, start=start, maxoid=maxoid,
@@ -209,11 +209,11 @@
 
         self._quota = quota
 
-        # Serialno cache statistics.
-        self._oid2serial_nlookups = self._oid2serial_nhits = 0
+        # tid cache statistics.
+        self._oid2tid_nlookups = self._oid2tid_nhits = 0
 
     def _initIndex(self, index, vindex, tindex, tvindex,
-                   oid2serial, toid2serial, toid2serial_delete):
+                   oid2tid, toid2tid, toid2tid_delete):
         self._index=index
         self._vindex=vindex
         self._tindex=tindex
@@ -221,26 +221,26 @@
         self._index_get=index.get
         self._vindex_get=vindex.get
 
-        # .store() needs to compare the passed-in serial to the current
-        # serial in the database.  _oid2serial caches the oid -> current
-        # serial mapping for non-version data (if the current record for
-        # oid is version data, the oid is not a key in _oid2serial).
-        # The point is that otherwise seeking into the storage is needed
-        # to extract the current serial, and that's an expensive operation.
-        # For example, if a transaction stores 4000 objects, and each
-        # random seek + read takes 7ms (that was approximately true on
-        # Linux and Windows tests in mid-2003), that's 28 seconds just to
-        # find the old serials.
+        # .store() needs to compare the passed-in serial to the
+        # current tid in the database.  _oid2tid caches the oid ->
+        # current tid mapping for non-version data (if the current
+        # record for oid is version data, the oid is not a key in
+        # _oid2tid).  The point is that otherwise seeking into the
+        # storage is needed to extract the current tid, and that's
+        # an expensive operation.  For example, if a transaction
+        # stores 4000 objects, and each random seek + read takes 7ms
+        # (that was approximately true on Linux and Windows tests in
+        # mid-2003), that's 28 seconds just to find the old tids.
         # XXX Probably better to junk this and redefine _index as mapping
-        # XXX oid to (offset, serialno) pair, via a new memory-efficient
+        # XXX oid to (offset, tid) pair, via a new memory-efficient
         # XXX BTree type.
-        self._oid2serial = oid2serial
-        # oid->serialno map to transactionally add to _oid2serial.
-        self._toid2serial = toid2serial
-        # Set of oids to transactionally delete from _oid2serial (e.g.,
+        self._oid2tid = oid2tid
+        # oid->tid map to transactionally add to _oid2tid.
+        self._toid2tid = toid2tid
+        # Set of oids to transactionally delete from _oid2tid (e.g.,
         # oids reverted by undo, or for which the most recent record
         # becomes version data).
-        self._toid2serial_delete = toid2serial_delete
+        self._toid2tid_delete = toid2tid_delete
 
     def __len__(self):
         return len(self._index)
@@ -409,28 +409,28 @@
             LOG("ZODB FS", ERROR, "Error saving index on close()",
                 error=sys.exc_info())
 
-    # Return serial number of most recent record for oid if that's in
-    # the _oid2serial cache.  Else return None.  It's important to use
-    # this instead of indexing _oid2serial directly so that cache
-    # statistics can be logged.
-    def _get_cached_serial(self, oid):
-        self._oid2serial_nlookups += 1
-        result = self._oid2serial.get(oid)
+    # Return tid of most recent record for oid if that's in the
+    # _oid2tid cache.  Else return None.  It's important to use this
+    # instead of indexing _oid2tid directly so that cache statistics
+    # can be logged.
+    def _get_cached_tid(self, oid):
+        self._oid2tid_nlookups += 1
+        result = self._oid2tid.get(oid)
         if result is not None:
-            self._oid2serial_nhits += 1
+            self._oid2tid_nhits += 1
 
         # Log a msg every ~8000 tries, and prevent overflow.
-        if self._oid2serial_nlookups & 0x1fff == 0:
-            if self._oid2serial_nlookups >> 30:
+        if self._oid2tid_nlookups & 0x1fff == 0:
+            if self._oid2tid_nlookups >> 30:
                 # In older Pythons, we may overflow if we keep it an int.
-                self._oid2serial_nlookups = long(self._oid2serial_nlookups)
-                self._oid2serial_nhits = long(self._oid2serial_nhits)
-            blather("_oid2serial size %s lookups %s hits %s rate %.1f%%",
-                    len(self._oid2serial),
-                    self._oid2serial_nlookups,
-                    self._oid2serial_nhits,
-                    100.0 * self._oid2serial_nhits /
-                            self._oid2serial_nlookups)
+                self._oid2tid_nlookups = long(self._oid2tid_nlookups)
+                self._oid2tid_nhits = long(self._oid2tid_nhits)
+            blather("_oid2tid size %s lookups %s hits %s rate %.1f%%",
+                    len(self._oid2tid),
+                    self._oid2tid_nlookups,
+                    self._oid2tid_nhits,
+                    100.0 * self._oid2tid_nhits /
+                            self._oid2tid_nlookups)
 
         return result
 
@@ -481,27 +481,15 @@
         here = self._pos + (self._tfile.tell() + self._thl)
         oids = []
         current_oids = {}
-        if not abort:
-            newserial = self._serial
 
         while srcpos:
             h = self._read_data_header(srcpos)
-            if abort:
-                # If we are aborting, the serialno in the new data
-                # record should be the same as the serialno in the last
-                # non-version data record.
-                # XXX This might be the only time that the serialno
-                # of a data record does not match the transaction id.
-                h_pnv = self._read_data_header(h.pnv)
-                serial = h_pnv.serial
-            else:
-                serial = newserial
 
             if self._index.get(h.oid) == srcpos:
                 # This is a current record!
                 self._tindex[h.oid] = here
                 oids.append(h.oid)
-                self._tfile.write(h.oid + serial + spos + middle)
+                self._tfile.write(h.oid + self._tid + spos + middle)
                 if dest:
                     self._tvindex[dest] = here
                     self._tfile.write(p64(h.pnv) + sd + dest)
@@ -520,8 +508,8 @@
 
             srcpos = h.vprev
             spos = p64(srcpos)
-        self._toid2serial_delete.update(current_oids)
-        return self._serial, oids
+        self._toid2tid_delete.update(current_oids)
+        return self._tid, oids
 
     def getSize(self):
         return self._pos
@@ -542,25 +530,23 @@
             pos = self._lookup_pos(oid)
             h = self._read_data_header(pos, oid)
             if h.version and h.version != version:
-                # Return data and serial from pnv (non-version data).
+                # Return data and tid from pnv (non-version data).
 
                 # If we return the old record's transaction id, then
                 # it will look to the cache like old data is current.
                 # The tid for the current data must always be greater
                 # than any non-current data.
-                data, serial, _, _ = self._loadBack_impl(oid, h.pnv)
-                th = self._read_txn_header(h.tloc)
-                return data, serial, th.tid, ""
+                data = self._loadBack_impl(oid, h.pnv)[0]
+                return data, h.tid, ""
             if h.plen:
                 data = self._file.read(h.plen)
-                th = self._read_txn_header(h.tloc)
-                return data, h.serial, th.tid, h.version
+                return data, h.tid, h.version
             else:
-                # Get the data from the backpointer, but tid and serial
-                # from currnt txn.
+                # Get the data from the backpointer, but tid from
+                # currnt txn.
                 data, _, _, _ = self._loadBack_impl(oid, h.back)
                 th = self._read_txn_header(h.tloc)
-                return data, h.serial, th.tid, h.version
+                return data, h.tid, h.version
         finally:
             self._lock_release()
 
@@ -570,13 +556,13 @@
             pos = self._lookup_pos(oid)
             h = self._read_data_header(pos, oid)
             if h.version and h.version != version:
-                data, serial, _, _ = self._loadBack_impl(oid, h.pnv)
-                return data, serial
+                data = self._loadBack_impl(oid, h.pnv)[0]
+                return data, h.tid
             if h.plen:
-                return self._file.read(h.plen), h.serial
+                return self._file.read(h.plen), h.tid
             else:
                 data = self._loadBack_impl(oid, h.back)[0]
-                return data, h.serial
+                return data, h.tid
         finally:
             self._lock_release()
 
@@ -586,7 +572,7 @@
             pos = self._lookup_pos(oid)
             while 1:
                 h = self._read_data_header(pos, oid)
-                if h.serial == serial:
+                if h.tid == serial:
                     break
                 pos = h.prev
                 if not pos:
@@ -598,53 +584,35 @@
         finally:
             self._lock_release()
 
-    def loadNonCurrent(self, oid, tid):
+    def loadBefore(self, oid, tid):
         pos = self._lookup_pos(oid)
         end_tid = None
         while True:
             h = self._read_data_header(pos, oid)
-            # Is this the first data record written before tid?
-            #
-            # In most cases, we can just use the serial number which
-            # is more efficient.  In the presence of abort version,
-            # the record's serial number will be the tid of the old
-            # non-version txn.  So we can only use the serial test
-            # if there is no backpointer; abortVersion always writes
-            # a backpointer.
-
-            # The logic is a bit complicated, so explicitly track
-            # whether we need to read the txn header.
-            th = None
-            # XXX Should do it this way: If there is version data,
-            # just following the backpointer to the non-version data.
-            if not h.version:
-                if not h.back:
-                    if h.serial < tid:
-                        break
-                else:
-                    th = self._read_txn_header(h.tloc)
-                    if th.tid < tid:
-                        break
-            if th is None:
-                th = self._read_txn_header(h.tloc)
+            if h.version:
+                # Just follow the pnv pointer to the previous
+                # non-version data.
+                if not h.pnv:
+                    # Object was created in version.  There is no
+                    # before data to find.
+                    return None
+                pos = h.pnv
+                end_tid = h.tid
+                continue
+
+            if h.tid < tid:
+                break
+            
             pos = h.prev
-            end_tid = th.tid
+            end_tid = h.tid
             if not pos:
                 return None
+            
         if h.back:
-            # _loadBack() will return the serialno and tid of the txn
-            # pointed to, which is wrong.  Only use it for the data.
-            th = self._read_txn_header(h.tloc)
-            assert th.tid < tid
-            data, serial, _, _ = self._loadBack_impl(oid, h.back)
-            return data, serial, th.tid, end_tid
+            data, _, _, _ = self._loadBack_impl(oid, h.back)
+            return data, h.tid, end_tid
         else:
-            # If the transaction wrote new data, then it must have been
-            # written by a regular store() and its tid will be the same
-            # as its serialno.
-
-            # XXX Or does restore() complicate this argument?
-            return self._file.read(h.plen), h.serial, h.serial, end_tid
+            return self._file.read(h.plen), h.tid, end_tid
 
     def modifiedInVersion(self, oid):
         self._lock_acquire()
@@ -664,29 +632,29 @@
         self._lock_acquire()
         try:
             old = self._index_get(oid, 0)
-            cached_serial = None
+            cached_tid = None
             pnv = None
             if old:
-                cached_serial = self._get_cached_serial(oid)
-                if cached_serial is None:
+                cached_tid = self._get_cached_tid(oid)
+                if cached_tid is None:
                     h = self._read_data_header(old, oid)
                     if h.version:
                         if h.version != version:
                             raise VersionLockError(oid, h.version)
                         pnv = h.pnv
-                    cached_serial = h.serial
+                    cached_tid = h.tid
 
-                if serial != cached_serial:
-                    data = self.tryToResolveConflict(oid, cached_serial,
+                if serial != cached_tid:
+                    data = self.tryToResolveConflict(oid, cached_tid,
                                                      serial, data)
                     if data is None:
                         raise POSException.ConflictError(
-                            oid=oid, serials=(cached_serial, serial))
+                            oid=oid, serials=(cached_tid, serial))
 
             pos = self._pos
             here = pos + self._tfile.tell() + self._thl
             self._tindex[oid] = here
-            new = DataHeader(oid, self._serial, old, pos, len(version),
+            new = DataHeader(oid, self._tid, old, pos, len(version),
                              len(data))
 
             if version:
@@ -697,9 +665,9 @@
                     pnv = old
                 new.setVersion(version, pnv, pv)
                 self._tvindex[version] = here
-                self._toid2serial_delete[oid] = 1
+                self._toid2tid_delete[oid] = 1
             else:
-                self._toid2serial[oid] = self._serial
+                self._toid2tid[oid] = self._tid
 
             self._tfile.write(new.asString())
             self._tfile.write(data)
@@ -709,10 +677,10 @@
                 raise FileStorageQuotaError(
                     "The storage quota has been exceeded.")
 
-            if old and serial != cached_serial:
+            if old and serial != cached_tid:
                 return ConflictResolution.ResolvedSerial
             else:
-                return self._serial
+                return self._tid
 
         finally:
             self._lock_release()
@@ -754,7 +722,7 @@
         # differences:
         #
         # - serial is the serial number of /this/ revision, not of the
-        #   previous revision.  It is used instead of self._serial, which is
+        #   previous revision.  It is used instead of self._tid, which is
         #   ignored.
         #
         # - Nothing is returned
@@ -804,9 +772,9 @@
                     vprev = self._vindex.get(version, 0)
                 new.setVersion(version, pnv, vprev)
                 self._tvindex[version] = here
-                self._toid2serial_delete[oid] = 1
+                self._toid2tid_delete[oid] = 1
             else:
-                self._toid2serial[oid] = serial
+                self._toid2tid[oid] = serial
 
             self._tfile.write(new.asString())
 
@@ -855,8 +823,8 @@
     def _clear_temp(self):
         self._tindex.clear()
         self._tvindex.clear()
-        self._toid2serial.clear()
-        self._toid2serial_delete.clear()
+        self._toid2tid.clear()
+        self._toid2tid_delete.clear()
         if self._tfile is not None:
             self._tfile.seek(0)
 
@@ -890,7 +858,7 @@
             tl = self._thl + dlen
 
             try:
-                h = TxnHeader(self._serial, tl, "c", len(user),
+                h = TxnHeader(self._tid, tl, "c", len(user),
                               len(descr), len(ext))
                 h.user = user
                 h.descr = descr
@@ -927,10 +895,10 @@
 
             self._index.update(self._tindex)
             self._vindex.update(self._tvindex)
-            self._oid2serial.update(self._toid2serial)
-            for oid in self._toid2serial_delete.keys():
+            self._oid2tid.update(self._toid2tid)
+            for oid in self._toid2tid_delete.keys():
                 try:
-                    del self._oid2serial[oid]
+                    del self._oid2tid[oid]
                 except KeyError:
                     pass
 
@@ -954,7 +922,7 @@
         return 1
 
     def _undoDataInfo(self, oid, pos, tpos):
-        """Return the serial, data pointer, data, and version for the oid
+        """Return the tid, data pointer, data, and version for the oid
         record at pos"""
         if tpos:
             pos = tpos - self._pos - self._thl
@@ -976,20 +944,20 @@
         if tpos:
             self._tfile.seek(tpos) # Restore temp file to end
 
-        return h.serial, pos, data, h.version
+        return h.tid, pos, data, h.version
 
-    def getSerial(self, oid):
+    def getTid(self, oid):
         self._lock_acquire()
         try:
-            result = self._get_cached_serial(oid)
+            result = self._get_cached_tid(oid)
             if result is None:
                 pos = self._lookup_pos(oid)
-                result = self._getSerial(oid, pos)
+                result = self._getTid(oid, pos)
             return result
         finally:
             self._lock_release()
 
-    def _getSerial(self, oid, pos):
+    def _getTid(self, oid, pos):
         self._file.seek(pos)
         h = self._file.read(16)
         assert oid == h[:8]
@@ -1002,7 +970,7 @@
         else:
             return "", None
 
-    def _transactionalUndoRecord(self, oid, pos, serial, pre, version):
+    def _transactionalUndoRecord(self, oid, pos, tid, pre, version):
         """Get the indo information for a data record
 
         Return a 5-tuple consisting of a pickle, data pointer,
@@ -1021,8 +989,7 @@
         if tipos != pos:
             # Eek, a later transaction modified the data, but,
             # maybe it is pointing at the same data we are.
-            cserial, cdataptr, cdata, cver = self._undoDataInfo(
-                oid, ipos, tpos)
+            ctid, cdataptr, cdata, cver = self._undoDataInfo(oid, ipos, tpos)
             # Versions of undone record and current record *must* match!
             if cver != version:
                 raise UndoError('Current and undone versions differ', oid)
@@ -1061,12 +1028,11 @@
             return "", pre, version, snv, ipos
 
         try:
-            # returns data, serial tuple
             bdata = self._loadBack_impl(oid, pre)[0]
         except KeyError:
             # couldn't find oid; what's the real explanation for this?
             raise UndoError("_loadBack() failed for %s", oid)
-        data = self.tryToResolveConflict(oid, cserial, serial, bdata, cdata)
+        data = self.tryToResolveConflict(oid, ctid, tid, bdata, cdata)
 
         if data:
             return data, 0, version, snv, ipos
@@ -1133,11 +1099,11 @@
         tpos = self._txn_find(tid, 1)
         tindex = self._txn_undo_write(tpos)
         self._tindex.update(tindex)
-        # Arrange to clear the affected oids from the oid2serial cache.
+        # Arrange to clear the affected oids from the oid2tid cache.
         # It's too painful to try to update them to correct current
         # values instead.
-        self._toid2serial_delete.update(tindex)
-        return self._serial, tindex.keys()
+        self._toid2tid_delete.update(tindex)
+        return self._tid, tindex.keys()
 
     def _txn_find(self, tid, stop_at_pack):
         pos = self._pos
@@ -1181,12 +1147,12 @@
                                                        self._tfile.tell())
             try:
                 p, prev, v, snv, ipos = self._transactionalUndoRecord(
-                    h.oid, pos, h.serial, h.prev, h.version)
+                    h.oid, pos, h.tid, h.prev, h.version)
             except UndoError, v:
                 # Don't fail right away. We may be redeemed later!
                 failures[h.oid] = v
             else:
-                new = DataHeader(h.oid, self._serial, ipos, otloc, len(v),
+                new = DataHeader(h.oid, self._tid, ipos, otloc, len(v),
                                  len(p))
                 if v:
                     vprev = self._tvindex.get(v, 0) or self._vindex.get(v, 0)
@@ -1294,10 +1260,10 @@
                 else:
                     d = {}
 
-                d.update({"time": TimeStamp(h.serial).timeTime(),
+                d.update({"time": TimeStamp(h.tid).timeTime(),
                           "user_name": user_name,
                           "description": description,
-                          "serial": h.serial,
+                          "tid": h.tid,
                           "version": h.version,
                           "size": h.plen,
                           })
@@ -1372,8 +1338,8 @@
                 os.rename(self._file_name + '.pack', self._file_name)
                 self._file = open(self._file_name, 'r+b')
                 self._initIndex(p.index, p.vindex, p.tindex, p.tvindex,
-                                p.oid2serial, p.toid2serial,
-                                p.toid2serial_delete)
+                                p.oid2tid, p.toid2tid,
+                                p.toid2tid_delete)
                 self._pos = opos
                 self._save_index()
             finally:
@@ -1392,14 +1358,14 @@
         """Return transaction id for last committed transaction"""
         return self._ltid
 
-    def lastSerial(self, oid):
+    def lastTid(self, oid):
         """Return last serialno committed for object oid.
 
         If there is no serialno for this oid -- which can only occur
         if it is a new object -- return None.
         """
         try:
-            return self.getSerial(oid)
+            return self.getTid(oid)
         except KeyError:
             return None
 
@@ -1978,12 +1944,12 @@
                     # instead of a pickle to indicate this.
                     data = None
                 else:
-                    data, _s, tid = self._loadBackTxn(h.oid, h.back, False)
+                    data, tid = self._loadBackTxn(h.oid, h.back, False)
                     # XXX looks like this only goes one link back, should
                     # it go to the original data like BDBFullStorage?
                     prev_txn = self.getTxnFromData(h.oid, h.back)
 
-            r = Record(h.oid, h.serial, h.version, data, prev_txn)
+            r = Record(h.oid, h.tid, h.version, data, prev_txn)
 
             return r
 
@@ -1992,7 +1958,7 @@
 class Record(BaseStorage.DataRecord):
     """An abstract database record."""
     def __init__(self, *args):
-        self.oid, self.serial, self.version, self.data, self.data_txn = args
+        self.oid, self.tid, self.version, self.data, self.data_txn = args
 
 class UndoSearch:
 


=== ZODB3/ZODB/FileStorage/format.py 1.1.2.4 => 1.1.2.5 ===
--- ZODB3/ZODB/FileStorage/format.py:1.1.2.4	Wed Nov 12 00:23:49 2003
+++ ZODB3/ZODB/FileStorage/format.py	Tue Dec  2 02:10:31 2003
@@ -49,8 +49,7 @@
 #
 #   - 8-byte oid.
 #
-#   - 8-byte serial, which is a type stamp that matches the
-#     transaction timestamp.
+#   - 8-byte tid, which matches the transaction id in the transaction record.
 #
 #   - 8-byte previous-record file-position.
 #
@@ -218,18 +217,14 @@
                 raise POSKeyError(oid)
             h = self._read_data_header(back)
             if h.plen:
-                return self._file.read(h.plen), h.serial, back, h.tloc
+                return self._file.read(h.plen), h.tid, back, h.tloc
             if h.back == 0 and not fail:
-                return None, h.serial, back, h.tloc
+                return None, h.tid, back, h.tloc
             back = h.back
 
     def _loadBackTxn(self, oid, back, fail=True):
-        """Return data, serial, and txn id for backpointer."""
-        data, serial, old, tloc = self._loadBack_impl(oid, back, fail)
-        self._file.seek(tloc)
-        h = self._file.read(TRANS_HDR_LEN)
-        tid = h[:8]
-        return data, serial, tid
+        """Return data and txn id for backpointer."""
+        return self._loadBack_impl(oid, back, fail)[:2]
 
     def _loadBackPOS(self, oid, back):
         return self._loadBack_impl(oid, back)[2]
@@ -237,9 +232,7 @@
     def getTxnFromData(self, oid, back):
         """Return transaction id for data at back."""
         h = self._read_data_header(back, oid)
-        self._file.seek(h.tloc)
-        # seek to transaction header, where tid is first 8 bytes
-        return self._file.read(8)
+        return h.tid
 
     def fail(self, pos, msg, *args):
         s = ("%s:%s:" + msg) % ((self._name, pos) + args)
@@ -281,22 +274,22 @@
     """Header for a data record."""
 
     __slots__ = (
-        "oid", "serial", "prev", "tloc", "vlen", "plen", "back",
+        "oid", "tid", "prev", "tloc", "vlen", "plen", "back",
         # These three attributes are only defined when vlen > 0
         "pnv", "vprev", "version")
 
-    def __init__(self, oid, serial, prev, tloc, vlen, plen):
+    def __init__(self, oid, tid, prev, tloc, vlen, plen):
         self.back = 0 # default
         self.version = "" # default
         self.oid = oid
-        self.serial = serial
+        self.tid = tid
         self.prev = prev
         self.tloc = tloc
         self.vlen = vlen
         self.plen = plen
 
     def asString(self):
-        s = struct.pack(DATA_HDR, self.oid, self.serial, self.prev,
+        s = struct.pack(DATA_HDR, self.oid, self.tid, self.prev,
                         self.tloc, self.vlen, self.plen)
         if self.version:
             v = struct.pack(">QQ", self.pnv, self.vprev)


=== ZODB3/ZODB/FileStorage/fspack.py 1.1.2.1 => 1.1.2.2 ===
--- ZODB3/ZODB/FileStorage/fspack.py:1.1.2.1	Tue Oct  7 14:16:34 2003
+++ ZODB3/ZODB/FileStorage/fspack.py	Tue Dec  2 02:10:31 2003
@@ -36,11 +36,10 @@
 from ZODB.utils import p64, u64, z64, oid_repr
 from zLOG import LOG, BLATHER, WARNING, ERROR, PANIC
 
-try:
-    from ZODB.fsIndex import fsIndex
-except ImportError:
-    def fsIndex():
-        return {}
+from ZODB.fsIndex import fsIndex
+from ZODB.FileStorage.format \
+     import FileStorageFormatter, CorruptedDataError, DataHeader, \
+     TRANS_HDR_LEN
 
 class DataCopier(FileStorageFormatter):
     """Mixin class for copying transactions into a storage.
@@ -415,15 +414,15 @@
         # vindex: version -> pos of XXX
         # tindex: oid -> pos, for current txn
         # tvindex: version -> pos of XXX, for current txn
-        # oid2serial: not used by the packer
+        # oid2tid: not used by the packer
 
         self.index = fsIndex()
         self.vindex = {}
         self.tindex = {}
         self.tvindex = {}
-        self.oid2serial = {}
-        self.toid2serial = {}
-        self.toid2serial_delete = {}
+        self.oid2tid = {}
+        self.toid2tid = {}
+        self.toid2tid_delete = {}
 
         # Index for non-version data.  This is a temporary structure
         # to reduce I/O during packing
@@ -517,7 +516,7 @@
         """
         if back == 0:
             return None
-        data, serial, tid = self._loadBackTxn(oid, back, 0)
+        data, tid = self._loadBackTxn(oid, back, 0)
         return data
 
     def copyDataRecords(self, pos, th):
@@ -631,7 +630,7 @@
                 if h.back:
                     prev_txn = self.getTxnFromData(h.oid, h.back)
 
-            self._copier.copy(h.oid, h.serial, data, h.version,
+            self._copier.copy(h.oid, h.tid, data, h.version,
                               prev_txn, pos, self._tfile.tell())
 
         tlen = self._tfile.tell() - pos




More information about the Zodb-checkins mailing list