[Zope3-checkins] CVS: ZODB4/src/zodb/zeo/tests - connection.py:1.8.24.2

Jeremy Hylton jeremy@zope.com
Wed, 18 Jun 2003 14:51:28 -0400


Update of /cvs-repository/ZODB4/src/zodb/zeo/tests
In directory cvs.zope.org:/tmp/cvs-serv9754

Modified Files:
      Tag: ZODB3-2-merge
	connection.py 
Log Message:
Sync with latest changes from ZODB 3.2.

Remove NOcheckMultiStorageTransaction() since it wasn't run and the
new concurrent updates tests seem to cover the same ground.

Change all the names from check... to test....


=== ZODB4/src/zodb/zeo/tests/connection.py 1.8.24.1 => 1.8.24.2 ===
--- ZODB4/src/zodb/zeo/tests/connection.py:1.8.24.1	Wed Jun 18 11:49:27 2003
+++ ZODB4/src/zodb/zeo/tests/connection.py	Wed Jun 18 14:51:28 2003
@@ -30,6 +30,7 @@
 from zodb.zeo.tests.common import TestClientStorage, DummyDB
 
 from transaction import get_transaction
+from zodb.db import DB
 from zodb.ztransaction import Transaction
 from zodb.storage.interfaces import ReadOnlyError
 from zodb.storage.tests.base import StorageTestBase
@@ -222,7 +223,7 @@
     start and stop a ZEO storage server.
     """
 
-    def checkMultipleAddresses(self):
+    def testMultipleAddresses(self):
         for i in range(4):
             self._newAddr()
         self._storage = self.openClientStorage('test', 100000)
@@ -231,7 +232,7 @@
         self._dostore(oid, data=obj)
         self._storage.close()
 
-    def checkMultipleServers(self):
+    def testMultipleServers(self):
         # XXX crude test at first -- just start two servers and do a
         # commit at each one.
 
@@ -245,15 +246,18 @@
         # If we can still store after shutting down one of the
         # servers, we must be reconnecting to the other server.
 
+        did_a_store = False
         for i in range(10):
             try:
                 self._dostore()
+                did_a_store = True
                 break
             except ClientDisconnected:
                 time.sleep(0.5)
                 self._storage.sync()
+        self.assert_(did_a_store)
 
-    def checkReadOnlyClient(self):
+    def testReadOnlyClient(self):
         # Open a read-only client to a read-write server; stores fail
 
         # Start a read-only client for a read-write server
@@ -261,7 +265,7 @@
         # Stores should fail here
         self.assertRaises(ReadOnlyError, self._dostore)
 
-    def checkReadOnlyServer(self):
+    def testReadOnlyServer(self):
         # Open a read-only client to a read-only *server*; stores fail
 
         # We don't want the read-write server created by setUp()
@@ -274,7 +278,7 @@
         # Stores should fail here
         self.assertRaises(ReadOnlyError, self._dostore)
 
-    def checkReadOnlyFallbackWritable(self):
+    def testReadOnlyFallbackWritable(self):
         # Open a fallback client to a read-write server; stores succeed
 
         # Start a read-only-fallback client for a read-write server
@@ -282,7 +286,7 @@
         # Stores should succeed here
         self._dostore()
 
-    def checkReadOnlyFallbackReadOnlyServer(self):
+    def testReadOnlyFallbackReadOnlyServer(self):
         # Open a fallback client to a read-only *server*; stores fail
 
         # We don't want the read-write server created by setUp()
@@ -299,7 +303,7 @@
     # further down.  Is the code here hopelessly naive, or is
     # checkReconnection() overwrought?
 
-    def checkReconnectWritable(self):
+    def testReconnectWritable(self):
         # A read-write client reconnects to a read-write server
 
         # Start a client
@@ -322,7 +326,7 @@
         # Stores should succeed here
         self._dostore()
 
-    def checkDisconnectionError(self):
+    def testDisconnectionError(self):
         # Make sure we get a ClientDisconnected when we try to read an
         # object when we're not connected to a storage server and the
         # object is not in the cache.
@@ -331,7 +335,7 @@
         self.assertRaises(ClientDisconnected,
                           self._storage.load, 'fredwash', '')
 
-    def checkDisconnectedAbort(self):
+    def testDisconnectedAbort(self):
         self._storage = self.openClientStorage()
         self._dostore()
         oids = [self._storage.newObjectId() for i in range(5)]
@@ -347,7 +351,7 @@
         self._storage._wait()
         self._dostore()
 
-    def checkBasicPersistence(self):
+    def testBasicPersistence(self):
         # Verify cached data persists across client storage instances.
 
         # To verify that the cache is being used, the test closes the
@@ -366,7 +370,7 @@
         self.assertEqual(revid1, revid2)
         self._storage.close()
 
-    def checkRollover(self):
+    def testRollover(self):
         # Check that the cache works when the files are swapped.
 
         # In this case, only one object fits in a cache file.  When the
@@ -385,7 +389,7 @@
         self._storage.load(oid1, '')
         self._storage.load(oid2, '')
 
-    def checkReconnection(self):
+    def testReconnection(self):
         # Check that the client reconnects when a server restarts.
 
         # XXX Seem to get occasional errors that look like this:
@@ -419,11 +423,11 @@
             self.fail("Could not reconnect to server")
         self.logger.warn("checkReconnection: finished")
 
-    def checkBadMessage1(self):
+    def testBadMessage1(self):
         # not even close to a real message
         self._bad_message("salty")
 
-    def checkBadMessage2(self):
+    def testBadMessage2(self):
         # just like a real message, but with an unpicklable argument
         global Hack
         class Hack:
@@ -457,49 +461,44 @@
         self._storage = self.openClientStorage()
         self._dostore()
 
-    # Test case for multiple storages participating in a single
-    # transaction.  This is not really a connection test, but it needs
-    # about the same infrastructure (several storage servers).
-
-    # XXX WARNING: with the current ZEO code, this occasionally fails.
-    # That's the point of this test. :-)
-
-    def NOcheckMultiStorageTransaction(self):
-        # Configuration parameters (larger values mean more likely deadlocks)
-        N = 2
-        # These don't *have* to be all the same, but it's convenient this way
-        self.nservers = N
-        self.nthreads = N
-        self.ntrans = N
-        self.nobj = N
+    def testCrossDBInvalidations(self):
+        db1 = DB(self.openClientStorage())
+        c1 = db1.open()
+        r1 = c1.root()
 
-        # Start extra servers
-        for i in range(1, self.nservers):
-            self._newAddr()
-            self.startServer(index=i)
+        r1["a"] = MinPO("a")
+        get_transaction().commit()
 
-        # Spawn threads that each do some transactions on all storages
-        threads = []
-        try:
-            for i in range(self.nthreads):
-                t = MSTThread(self, "T%d" % i)
-                threads.append(t)
-                t.start()
-            # Wait for all threads to finish
-            for t in threads:
-                t.join(60)
-                self.failIf(t.isAlive(), "%s didn't die" % t.getName())
-        finally:
-            for t in threads:
-                t.closeclients()
+        db2 = DB(self.openClientStorage())
+        r2 = db2.open().root()
+
+        self.assertEqual(r2["a"].value, "a")
+
+        r2["b"] = MinPO("b")
+        get_transaction().commit()
+
+        # make sure the invalidation is received in the other client
+        for i in range(10):
+            c1._storage.sync()
+            if r1._p_oid in c1._invalidated:
+                break
+            time.sleep(0.1)
+        self.assert_(r1._p_oid in c1._invalidated)
+
+        # force the invalidations to be applied...
+        c1.sync()
+        r1.keys() # unghostify
+        self.assertEqual(r1._p_serial, r2._p_serial)
 
+        db2.close()
+        db1.close()
 
 class ReconnectionTests(CommonSetupTearDown):
     keep = True
     forker_admin_retries = 20
     invq = 2
 
-    def checkReadOnlyStorage(self):
+    def testReadOnlyStorage(self):
         # Open a read-only client to a read-only *storage*; stores fail
 
         # We don't want the read-write server created by setUp()
@@ -512,7 +511,7 @@
         # Stores should fail here
         self.assertRaises(ReadOnlyError, self._dostore)
 
-    def checkReadOnlyFallbackReadOnlyStorage(self):
+    def testReadOnlyFallbackReadOnlyStorage(self):
         # Open a fallback client to a read-only *storage*; stores fail
 
         # We don't want the read-write server created by setUp()
@@ -525,7 +524,7 @@
         # Stores should fail here
         self.assertRaises(ReadOnlyError, self._dostore)
 
-    def checkReconnectReadOnly(self):
+    def testReconnectReadOnly(self):
         # A read-only client reconnects from a read-write to a
         # read-only server
 
@@ -549,7 +548,7 @@
         # Stores should still fail
         self.assertRaises(ReadOnlyError, self._dostore)
 
-    def checkReconnectFallback(self):
+    def testReconnectFallback(self):
         # A fallback client reconnects from a read-write to a
         # read-only server
 
@@ -573,7 +572,7 @@
         # Stores should fail here
         self.assertRaises(ReadOnlyError, self._dostore)
 
-    def checkReconnectUpgrade(self):
+    def testReconnectUpgrade(self):
         # A fallback client reconnects from a read-only to a
         # read-write server
 
@@ -602,7 +601,7 @@
         # Stores should now succeed
         self._dostore()
 
-    def checkReconnectSwitch(self):
+    def testReconnectSwitch(self):
         # A fallback client initially connects to a read-only server,
         # then discovers a read-write server and switches to that
 
@@ -631,7 +630,7 @@
         else:
             self.fail("Couldn't store after starting a read-write server")
 
-    def checkNoVerificationOnServerRestart(self):
+    def testNoVerificationOnServerRestart(self):
         self._storage = self.openClientStorage()
         # When we create a new storage, it should always do a full
         # verification
@@ -646,7 +645,7 @@
         # should be needed.
         self.assertEqual(self._storage.verify_result, "no verification")
 
-    def checkNoVerificationOnServerRestartWith2Clients(self):
+    def testNoVerificationOnServerRestartWith2Clients(self):
         perstorage = self.openClientStorage(cache="test")
         self.assertEqual(perstorage.verify_result, "full verification")
 
@@ -676,7 +675,7 @@
         self.assertEqual(perstorage.verify_result, "no verification")
         perstorage.close()
 
-    def checkQuickVerificationWith2Clients(self):
+    def testQuickVerificationWith2Clients(self):
         perstorage = self.openClientStorage(cache="test")
         self.assertEqual(perstorage.verify_result, "full verification")
 
@@ -703,7 +702,7 @@
 
 
 
-    def checkVerificationWith2ClientsInvqOverflow(self):
+    def testVerificationWith2ClientsInvqOverflow(self):
         perstorage = self.openClientStorage(cache="test")
         self.assertEqual(perstorage.verify_result, "full verification")
 
@@ -741,7 +740,7 @@
 class TimeoutTests(CommonSetupTearDown):
     timeout = 1
 
-    def checkTimeout(self):
+    def testTimeout(self):
         storage = self.openClientStorage()
         txn = Transaction()
         storage.tpcBegin(txn)
@@ -749,14 +748,14 @@
         time.sleep(2)
         self.assertRaises(ClientDisconnected, storage.tpcFinish, txn)
 
-    def checkTimeoutOnAbort(self):
+    def testTimeoutOnAbort(self):
         storage = self.openClientStorage()
         txn = Transaction()
         storage.tpcBegin(txn)
         storage.tpcVote(txn)
         storage.tpcAbort(txn)
 
-    def checkTimeoutOnAbortNoLock(self):
+    def testTimeoutOnAbortNoLock(self):
         storage = self.openClientStorage()
         txn = Transaction()
         storage.tpcBegin(txn)