[Checkins] SVN: gocept.zeoraid/trunk/ merged and fixed tests for dirceu-addstoragetool branch
Thomas Lotze
tl at gocept.com
Thu Jan 8 04:27:20 EST 2009
Log message for revision 94613:
merged and fixed tests for dirceu-addstoragetool branch
Changed:
A gocept.zeoraid/trunk/doc/CMDLINE.txt
U gocept.zeoraid/trunk/src/gocept/zeoraid/scripts/controller.py
U gocept.zeoraid/trunk/src/gocept/zeoraid/storage.py
U gocept.zeoraid/trunk/src/gocept/zeoraid/tests/test_basics.py
-=-
Copied: gocept.zeoraid/trunk/doc/CMDLINE.txt (from rev 94612, gocept.zeoraid/branches/dirceu-addstoragetool/doc/CMDLINE.txt)
===================================================================
--- gocept.zeoraid/trunk/doc/CMDLINE.txt (rev 0)
+++ gocept.zeoraid/trunk/doc/CMDLINE.txt 2009-01-08 09:27:20 UTC (rev 94613)
@@ -0,0 +1,35 @@
+==============================================
+Using the ZEORaid controller command-line tool
+==============================================
+
+Usage: controller.py [options] <command> [command_options]
+
+Options:
+
+ -p port -- port to connect to (default is 8100)
+
+ -h host -- host to connect to (default is 127.0.0.1)
+
+ -S name -- storage name (default is '1')
+
+Commands:
+
+ status -- Print short status information
+
+ details -- Print detailed status information
+
+ recover <storage> -- Start recovery for a storage
+
+ disable <storage> -- Disable a storage
+
+ reload </path/to/zeo.conf> -- Reload a specified zeo.conf file
+
+recover
+-------
+Multiple recoveries can't happen in parallel (see KNOWNBUGS.txt for more info).
+
+reload
+------
+The reload command takes a zeo.conf file and compares the names of the storages described there and the names of the storages currently loaded to see if any storage needs to be added and/or removed. A new storage is added in the degraded state, so you need to manually 'recover' it, while a removed storage is 'disabled', not really removed.
+
+Example: Let suppose we have a ZEORaid server connected with three storages called '1', '2' and '3' in the optimal state. Now we call 'reload' on a modified zeo.conf file that have the storages '1', '3', '4' and '5' - the storage '2' is disabled and the storages '4' and '5' are added as degraded storages. We now must 'recover' them, one at a time, before using them.
\ No newline at end of file
Modified: gocept.zeoraid/trunk/src/gocept/zeoraid/scripts/controller.py
===================================================================
--- gocept.zeoraid/trunk/src/gocept/zeoraid/scripts/controller.py 2009-01-08 09:09:28 UTC (rev 94612)
+++ gocept.zeoraid/trunk/src/gocept/zeoraid/scripts/controller.py 2009-01-08 09:27:20 UTC (rev 94613)
@@ -33,6 +33,8 @@
disable <storage> -- Disable a storage
+ reload </path/to/zeo.conf> -- Reload a specified zeo.conf file
+
"""
import optparse
@@ -71,12 +73,14 @@
def cmd_disable(self, storage):
print self.raid.raid_disable(storage)
+ def cmd_reload(self, path):
+ print self.raid.raid_reload(path)
def main(host="127.0.0.1", port=8100, storage="1"):
usage = "usage: %prog [options] command [command-options]"
description = ("Connect to a RAIDStorage on a ZEO server and perform "
"maintenance tasks. Available commands: status, details, "
- "recover <STORAGE>, disable <STORAGE>")
+ "recover <STORAGE>, disable <STORAGE>, reload </PATH/TO/ZEO.CONF>")
parser = optparse.OptionParser(usage=usage, description=description)
parser.add_option("-S", "--storage", default=storage,
Modified: gocept.zeoraid/trunk/src/gocept/zeoraid/storage.py
===================================================================
--- gocept.zeoraid/trunk/src/gocept/zeoraid/storage.py 2009-01-08 09:09:28 UTC (rev 94612)
+++ gocept.zeoraid/trunk/src/gocept/zeoraid/storage.py 2009-01-08 09:27:20 UTC (rev 94613)
@@ -34,6 +34,7 @@
import transaction
import transaction.interfaces
import ZODB.blob
+from ZEO.runzeo import ZEOOptions
import gocept.zeoraid.interfaces
import gocept.zeoraid.recovery
@@ -558,7 +559,8 @@
def getExtensionMethods(self):
# This method isn't officially part of the interface but it is supported.
methods = dict.fromkeys(
- ['raid_recover', 'raid_status', 'raid_disable', 'raid_details'])
+ ['raid_recover', 'raid_status', 'raid_disable', 'raid_details',
+ 'raid_reload'])
return methods
# IRAIDStorage
@@ -594,6 +596,23 @@
t.start()
return 'recovering %r' % (name,)
+ @ensure_open_storage
+ def raid_reload(self, path):
+ s = ""
+ options = ZEOOptions()
+ options.realize(['-C',path])
+ new_storages = dict([(o.name,o) for o in options.storages[0].config.storages])
+ storages_to_add = [(name, opener) for name, opener in new_storages.items() if name not in self.openers]
+ storages_to_remove = [(name, opener) for name, opener in self.openers.items() if name not in new_storages]
+ for name, opener in storages_to_remove:
+ self.raid_disable(name)
+ s += "removed %s\n" % name
+ for name, opener in storages_to_add:
+ self.openers[name] = opener
+ self.storages_degraded.append(name)
+ s += "added %s\n" % name
+ return s
+
# internal
def _open_storage(self, name):
Modified: gocept.zeoraid/trunk/src/gocept/zeoraid/tests/test_basics.py
===================================================================
--- gocept.zeoraid/trunk/src/gocept/zeoraid/tests/test_basics.py 2009-01-08 09:09:28 UTC (rev 94612)
+++ gocept.zeoraid/trunk/src/gocept/zeoraid/tests/test_basics.py 2009-01-08 09:27:20 UTC (rev 94613)
@@ -63,12 +63,13 @@
class ZEOOpener(object):
- def __init__(self, name, **kwargs):
+ def __init__(self, name, addr, **kwargs):
self.name = name
+ self.addr = addr
self.kwargs = kwargs or {}
def open(self, **kwargs):
- return ClientStorage(self.name, **self.kwargs)
+ return ClientStorage(self.addr, **self.kwargs)
class ZEOStorageBackendTests(StorageTestBase.StorageTestBase):
@@ -89,7 +90,7 @@
zconf, port)
self._pids.append(pid)
self._servers.append(adminaddr)
- self._storages.append(ZEOOpener(zport, storage='1',
+ self._storages.append(ZEOOpener(str(i), zport, storage='1',
min_disconnect_poll=0.5, wait=1,
wait_timeout=60))
self.open()
@@ -172,7 +173,7 @@
blob_dir = tempfile.mkdtemp()
self.temp_paths.append(blob_dir)
self._servers.append(adminaddr)
- self._storages.append(ZEOOpener(zport, storage='1',
+ self._storages.append(ZEOOpener(str(i), zport, storage='1',
cache_size=12,
blob_dir=blob_dir,
min_disconnect_poll=0.5, wait=1,
@@ -216,7 +217,7 @@
zconf, port)
self._pids.append(pid)
self._servers.append(adminaddr)
- self._storages.append(ZEOOpener(zport, storage='1',
+ self._storages.append(ZEOOpener(str(i), zport, storage='1',
cache_size=12,
blob_dir=blob_dir,
shared_blob_dir=True,
@@ -1205,7 +1206,8 @@
self.assertEquals(dict(raid_details=None,
raid_disable=None,
raid_recover=None,
- raid_status=None),
+ raid_status=None,
+ raid_reload=None),
methods)
def test_getExtensionMethods_degrading(self):
@@ -1453,10 +1455,82 @@
for x in xrange(self.backend_count)]))
+class ExtensionMethodsTests(ZEOStorageBackendTests):
+
+ def saveConfig(self, storages):
+ # create a config file and save it
+ file_contents = """\
+ %%import gocept.zeoraid
+ <zeo>
+ address 127.0.0.1:%s
+ </zeo>
+
+ <raidstorage main>
+ """ % get_port()
+
+ for count, storage in enumerate(storages):
+ file_contents += """\
+ <zeoclient %s>
+ server %s:%s
+ storage 1
+ </zeoclient>
+ """ % (count, self._servers[count][0], (self._servers[count][1]-1))
+
+ file_contents += """\
+ </raidstorage>
+ """
+
+ filename = tempfile.mktemp()
+ self._server_storage_files = [ ]
+ self._server_storage_files.append(filename)
+ f = open(filename, 'w')
+ f.write(file_contents)
+ f.close()
+ return filename
+
+ def test_reload_add(self):
+ # create and start a new ZEO server
+ port = get_port()
+ zconf = forker.ZEOConfig(('', port))
+ zport, adminaddr, pid, path = forker.start_zeo_server(self.getConfig(),
+ zconf, port)
+ self._pids.append(pid)
+ self._servers.append(adminaddr)
+ self._storages.append(ZEOOpener('6', zport, storage='1',
+ min_disconnect_poll=0.5, wait=1,
+ wait_timeout=60))
+
+ filename = self.saveConfig(self._storages)
+
+ # test if the new ZEO server is added as a storage
+ self.assertEquals(len(self._storage.openers), 5)
+ self._storage.raid_reload(filename)
+ self.assertEquals(len(self._storage.openers), 6)
+
+ # do a simple store to see if anything breaks
+ oid = self._storage.new_oid()
+ self._dostore(oid=oid)
+
+ def test_reload_remove(self):
+ # Remove the 4th storage
+ storages = [s for c,s in enumerate(self._storages) if c != 3]
+ filename = self.saveConfig(storages)
+
+ # test if the storage was removed (disabled, actually)
+ self.assertEquals(len(self._storage.storages_degraded), 0)
+ self._storage.raid_reload(filename)
+ self.assertEquals(len(self._storage.storages_degraded), 1)
+
+ # do a simple store to see if anything breaks
+ oid = self._storage.new_oid()
+ self._dostore(oid=oid)
+
+
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ZEOReplicationStorageTests, "check"))
suite.addTest(unittest.makeSuite(FailingStorageTests))
suite.addTest(unittest.makeSuite(FailingStorageSharedBlobTests))
suite.addTest(unittest.makeSuite(LoggingStorageDistributedTests))
+ suite.addTest(unittest.makeSuite(ExtensionMethodsTests))
return suite
More information about the Checkins
mailing list