[Zodb-checkins] CVS: Zope3/src/ZEO - ClientStorage.py:1.122 StorageServer.py:1.110 monitor.py:1.6 runzeo.py:1.24

Gintautas Miliauskas gintas at pov.lt
Sun Apr 25 07:34:56 EDT 2004


Update of /cvs-repository/Zope3/src/ZEO
In directory cvs.zope.org:/tmp/cvs-serv23562

Modified Files:
	ClientStorage.py StorageServer.py monitor.py runzeo.py 
Log Message:
This should be the last nail in zLOG's coffin. grep reveals no more references
to zLOG other than a few comments.

This definitely broke the log analyzers pretty bad.


=== Zope3/src/ZEO/ClientStorage.py 1.121 => 1.122 ===
--- Zope3/src/ZEO/ClientStorage.py:1.121	Thu Feb 26 19:31:51 2004
+++ Zope3/src/ZEO/ClientStorage.py	Sun Apr 25 07:34:15 2004
@@ -25,6 +25,7 @@
 import threading
 import time
 import types
+import logging
 
 from ZEO import ServerStub
 from ZEO.cache import ClientCache
@@ -34,11 +35,15 @@
 from ZEO.zrpc.client import ConnectionManager
 
 from ZODB import POSException
+from ZODB.loglevels import BLATHER
 from persistent.TimeStamp import TimeStamp
-from zLOG import LOG, PROBLEM, INFO, BLATHER, ERROR
 
-def log2(type, msg, subsys="ZCS:%d" % os.getpid()):
-    LOG(subsys, type, msg)
+logger = logging.getLogger('ZEO.ClientStorage')
+_pid = str(os.getpid())
+
+def log2(msg, level=logging.INFO, subsys=_pid, exc_info=False):
+    message = "(%s) %s" % (subsys, msg)
+    logger.log(level, message, exc_info=exc_info)
 
 try:
     from ZODB.ConflictResolution import ResolvedSerial
@@ -177,7 +182,7 @@
         testConnection() and doAuth() for details).
         """
 
-        log2(INFO, "%s (pid=%d) created %s/%s for storage: %r" %
+        log2("%s (pid=%d) created %s/%s for storage: %r" %
              (self.__class__.__name__,
               os.getpid(),
               read_only and "RO" or "RW",
@@ -185,18 +190,17 @@
               storage))
 
         if debug:
-            log2(INFO, "ClientStorage(): debug argument is no longer used")
+            log2("ClientStorage(): debug argument is no longer used")
 
         # wait defaults to True, but wait_for_server_on_startup overrides
         # if not None
         if wait_for_server_on_startup is not None:
             if wait is not None and wait != wait_for_server_on_startup:
-                log2(PROBLEM,
-                     "ClientStorage(): conflicting values for wait and "
-                     "wait_for_server_on_startup; wait prevails")
+                log2("ClientStorage(): conflicting values for wait and "
+                     "wait_for_server_on_startup; wait prevails",
+                     level=logging.WARNING)
             else:
-                log2(INFO,
-                     "ClientStorage(): wait_for_server_on_startup "
+                log2("ClientStorage(): wait_for_server_on_startup "
                      "is deprecated; please use wait instead")
                 wait = wait_for_server_on_startup
         elif wait is None:
@@ -325,7 +329,7 @@
     def _wait(self, timeout=None):
         if timeout is not None:
             deadline = time.time() + timeout
-            log2(BLATHER, "Setting deadline to %f" % deadline)
+            log2("Setting deadline to %f" % deadline, level=BLATHER)
         else:
             deadline = None
         # Wait for a connection to be established.
@@ -341,9 +345,10 @@
                 if self._ready.isSet():
                     break
                 if timeout and time.time() > deadline:
-                    log2(PROBLEM, "Timed out waiting for connection")
+                    log2("Timed out waiting for connection",
+                         level=logging.WARNING)
                     break
-                log2(INFO, "Waiting for cache verification to finish")
+                log2("Waiting for cache verification to finish")
         else:
             self._wait_sync(deadline)
 
@@ -354,9 +359,9 @@
             if self._ready.isSet():
                 break
             if deadline and time.time() > deadline:
-                log2(PROBLEM, "Timed out waiting for connection")
+                log2("Timed out waiting for connection", level=logging.WARNING)
                 break
-            log2(INFO, "Waiting for cache verification to finish")
+            log2("Waiting for cache verification to finish")
             if self._connection is None:
                 # If the connection was closed while we were
                 # waiting for it to become ready, start over.
@@ -408,16 +413,15 @@
 
         module = get_module(protocol)
         if not module:
-            log2(PROBLEM, "%s: no such an auth protocol: %s" %
-                 (self.__class__.__name__, protocol))
+            log2("%s: no such an auth protocol: %s" %
+                 (self.__class__.__name__, protocol), level=logging.WARNING)
             return
 
         storage_class, client, db_class = module
 
         if not client:
-            log2(PROBLEM,
-                 "%s: %s isn't a valid protocol, must have a Client class" %
-                 (self.__class__.__name__, protocol))
+            log2("%s: %s isn't a valid protocol, must have a Client class" %
+                 (self.__class__.__name__, protocol), level=logging.WARNING)
             raise AuthError, "invalid protocol"
 
         c = client(stub)
@@ -446,20 +450,20 @@
         succeeding register() call is deemed an optimal match, and any
         exception raised by register() is passed through.
         """
-        log2(INFO, "Testing connection %r" % conn)
+        log2("Testing connection %r" % conn)
         # XXX Check the protocol version here?
         self._conn_is_read_only = 0
         stub = self.StorageServerStubClass(conn)
 
         auth = stub.getAuthProtocol()
-        log2(INFO, "Server authentication protocol %r" % auth)
+        log2("Server authentication protocol %r" % auth)
         if auth:
             skey = self.doAuth(auth, stub)
             if skey:
-                log2(INFO, "Client authentication successful")
+                log2("Client authentication successful")
                 conn.setSessionKey(skey)
             else:
-                log2(ERROR, "Authentication failed")
+                log2("Authentication failed")
                 raise AuthError, "Authentication failed"
 
         try:
@@ -468,7 +472,7 @@
         except POSException.ReadOnlyError:
             if not self._read_only_fallback:
                 raise
-            log2(INFO, "Got ReadOnlyError; trying again with read_only=1")
+            log2("Got ReadOnlyError; trying again with read_only=1")
             stub.register(str(self._storage), read_only=1)
             self._conn_is_read_only = 1
             return 0
@@ -499,16 +503,16 @@
         self._connection = conn
 
         if reconnect:
-            log2(INFO, "Reconnected to storage: %s" % self._server_addr)
+            log2("Reconnected to storage: %s" % self._server_addr)
         else:
-            log2(INFO, "Connected to storage: %s" % self._server_addr)
+            log2("Connected to storage: %s" % self._server_addr)
 
         stub = self.StorageServerStubClass(conn)
         self._oids = []
         self._info.update(stub.get_info())
         self.verify_cache(stub)
         if not conn.is_async():
-            log2(INFO, "Waiting for cache verification to finish")
+            log2("Waiting for cache verification to finish")
             self._wait_sync()
         self._handle_extensions()
 
@@ -532,7 +536,8 @@
             try:
                 canonical, aliases, addrs = socket.gethostbyaddr(host)
             except socket.error, err:
-                log2(BLATHER, "Error resolving host: %s (%s)" % (host, err))
+                log2("Error resolving host: %s (%s)" % (host, err),
+                     level=BLATHER)
                 canonical = host
             self._server_addr = str((canonical, addr[1]))
 
@@ -559,27 +564,26 @@
         if last_inval_tid is not None:
             ltid = server.lastTransaction()
             if ltid == last_inval_tid:
-                log2(INFO, "No verification necessary "
-                     "(last_inval_tid up-to-date)")
+                log2("No verification necessary (last_inval_tid up-to-date)")
                 self._server = server
                 self._ready.set()
                 return "no verification"
 
             # log some hints about last transaction
-            log2(INFO, "last inval tid: %r %s\n"
+            log2("last inval tid: %r %s\n"
                  % (last_inval_tid, tid2time(last_inval_tid)))
-            log2(INFO, "last transaction: %r %s" %
+            log2("last transaction: %r %s" %
                  (ltid, ltid and tid2time(ltid)))
 
             pair = server.getInvalidations(last_inval_tid)
             if pair is not None:
-                log2(INFO, "Recovering %d invalidations" % len(pair[1]))
+                log2("Recovering %d invalidations" % len(pair[1]))
                 self.invalidateTransaction(*pair)
                 self._server = server
                 self._ready.set()
                 return "quick verification"
 
-        log2(INFO, "Verifying cache")
+        log2("Verifying cache")
         # setup tempfile to hold zeoVerify results
         self._tfile = tempfile.TemporaryFile(suffix=".inv")
         self._pickler = cPickle.Pickler(self._tfile, 1)
@@ -607,7 +611,7 @@
         This is called by ConnectionManager when the connection is
         closed or when certain problems with the connection occur.
         """
-        log2(INFO, "Disconnected from storage: %s" % repr(self._server_addr))
+        log2("Disconnected from storage: %s" % repr(self._server_addr))
         self._connection = None
         self._ready.clear()
         self._server = disconnected_stub
@@ -934,7 +938,8 @@
             try:
                 self._server.tpc_abort(id(txn))
             except ClientDisconnected:
-                log2(BLATHER, 'ClientDisconnected in tpc_abort() ignored')
+                log2("ClientDisconnected in tpc_abort() ignored",
+                     level=BLATHER)
         finally:
             self._tbuf.clear()
             self._seriald.clear()
@@ -1088,11 +1093,11 @@
         self._process_invalidations(InvalidationLogIterator(f))
         f.close()
 
-        log2(INFO, "endVerify finishing")
+        log2("endVerify finishing")
         self._server = self._pending_server
         self._ready.set()
         self._pending_conn = None
-        log2(INFO, "endVerify finished")
+        log2("endVerify finished")
 
     def invalidateTransaction(self, tid, args):
         """Invalidate objects modified by tid."""
@@ -1102,8 +1107,8 @@
         finally:
             self._lock.release()
         if self._pickler is not None:
-            log2(BLATHER,
-                 "Transactional invalidation during cache verification")
+            log2("Transactional invalidation during cache verification",
+                 level=BLATHER)
             for t in args:
                 self._pickler.dump(t)
             return


=== Zope3/src/ZEO/StorageServer.py 1.109 => 1.110 ===
--- Zope3/src/ZEO/StorageServer.py:1.109	Wed Mar 31 22:56:59 2004
+++ Zope3/src/ZEO/StorageServer.py	Sun Apr 25 07:34:15 2004
@@ -26,6 +26,7 @@
 import sys
 import threading
 import time
+import logging
 
 import transaction
 
@@ -37,23 +38,30 @@
 from ZEO.zrpc.trigger import trigger
 from ZEO.Exceptions import AuthError
 
-import zLOG
 from ZODB.ConflictResolution import ResolvedSerial
 from ZODB.POSException import StorageError, StorageTransactionError
 from ZODB.POSException import TransactionError, ReadOnlyError, ConflictError
 from ZODB.serialize import referencesf
 from ZODB.utils import u64, oid_repr
+from ZODB.loglevels import BLATHER
 
-_label = "ZSS" # Default label used for logging.
+logger = logging.getLogger('ZEO.StorageServer')
+
+# XXX This used to say "ZSS", which is now implied in the logger name, can this
+#     be either set to str(os.getpid()) (if that makes sense) or removed?
+_label = "" # default label used for logging.
 
 def set_label():
     """Internal helper to reset the logging label (e.g. after fork())."""
     global _label
-    _label = "ZSS:%s" % os.getpid()
+    _label = "%s" % os.getpid()
 
-def log(message, level=zLOG.INFO, label=None, error=None):
-    """Internal helper to log a message using zLOG."""
-    zLOG.LOG(label or _label, level, message, error=error)
+def log(message, level=logging.INFO, label=None, exc_info=False):
+    """Internal helper to log a message."""
+    label = label or _label
+    if label:
+        message = "(%s) %s" % (label, message)
+    logger.log(level, message, exc_info=exc_info)
 
 class StorageServerError(StorageError):
     """Error reported when an unpickleable exception is raised."""
@@ -131,8 +139,8 @@
         name = self.__class__.__name__
         return "<%s %X trans=%s s_trans=%s>" % (name, id(self), tid, stid)
 
-    def log(self, msg, level=zLOG.INFO, error=None):
-        zLOG.LOG(self.log_label, level, msg, error=error)
+    def log(self, msg, level=logging.INFO, exc_info=False):
+        log(msg, level=level, label=self.log_label, exc_info=exc_info)
 
     def setup_delegation(self):
         """Delegate several methods to the storage"""
@@ -164,7 +172,8 @@
             raise ReadOnlyError()
         if self.transaction is None:
             caller = sys._getframe().f_back.f_code.co_name
-            self.log("no current transaction: %s()" % caller, zLOG.PROBLEM)
+            self.log("no current transaction: %s()" % caller,
+                     level=logging.WARNING)
             if exc is not None:
                 raise exc(None, tid)
             else:
@@ -173,7 +182,7 @@
             caller = sys._getframe().f_back.f_code.co_name
             self.log("%s(%s) invalid; current transaction = %s" %
                      (caller, repr(tid), repr(self.transaction.id)),
-                     zLOG.PROBLEM)
+                     logging.WARNING)
             if exc is not None:
                 raise exc(self.transaction.id, tid)
             else:
@@ -412,7 +421,7 @@
             self.locked = 0
             self.timeout.end(self)
             self.stats.lock_time = None
-            self.log("Transaction released storage lock", zLOG.BLATHER)
+            self.log("Transaction released storage lock", BLATHER)
             # _handle_waiting() can start another transaction (by
             # restarting a waiting one) so must be done last
             self._handle_waiting()
@@ -494,13 +503,11 @@
             if isinstance(err, ConflictError):
                 self.stats.conflicts += 1
                 self.log("conflict error oid=%s msg=%s" %
-                         (oid_repr(oid), str(err)), zLOG.BLATHER)
+                         (oid_repr(oid), str(err)), BLATHER)
             if not isinstance(err, TransactionError):
                 # Unexpected errors are logged and passed to the client
-                exc_info = sys.exc_info()
-                self.log("store error: %s, %s" % exc_info[:2],
-                         zLOG.ERROR, error=exc_info)
-                del exc_info
+                self.log("store error: %s, %s" % sys.exc_info()[:2],
+                         logging.ERROR, exc_info=True)
             # Try to pickle the exception.  If it can't be pickled,
             # the RPC response would fail, so use something else.
             pickler = cPickle.Pickler()
@@ -509,7 +516,7 @@
                 pickler.dump(err, 1)
             except:
                 msg = "Couldn't pickle storage exception: %s" % repr(err)
-                self.log(msg, zLOG.ERROR)
+                self.log(msg, logging.ERROR)
                 err = StorageServerError(msg)
             # The exception is reported back as newserial for this oid
             newserial = err
@@ -518,7 +525,7 @@
                 self.invalidated.append((oid, version))
         if newserial == ResolvedSerial:
             self.stats.conflicts_resolved += 1
-            self.log("conflict resolved oid=%s" % oid_repr(oid), zLOG.BLATHER)
+            self.log("conflict resolved oid=%s" % oid_repr(oid), BLATHER)
         self.serials.append((oid, newserial))
         return err is None
 
@@ -572,7 +579,7 @@
                      "Clients waiting: %d." % len(self.storage._waiting))
             return d
         else:
-            self.log("Transaction acquired storage lock.", zLOG.BLATHER)
+            self.log("Transaction acquired storage lock.", BLATHER)
             return self._restart()
 
     def _restart(self, delay=None):
@@ -582,7 +589,7 @@
         else:
             template = "Preparing to commit transaction: %d objects, %d bytes"
         self.log(template % (self.txnlog.stores, self.txnlog.size()),
-                 level=zLOG.BLATHER)
+                 level=BLATHER)
         self._tpc_begin(self.transaction, self.tid, self.status)
         loads, loader = self.txnlog.get_loader()
         for i in range(loads):
@@ -615,7 +622,7 @@
             zeo_storage._restart(delay)
         except:
             self.log("Unexpected error handling waiting transaction",
-                     level=zLOG.WARNING, error=sys.exc_info())
+                     level=logging.WARNING, exc_info=True)
             zeo_storage.connection.close()
             return 0
         else:
@@ -794,7 +801,7 @@
             zstorage = self.ZEOStorageClass(self, self.read_only)
 
         c = self.ManagedServerConnectionClass(sock, addr, zstorage, self)
-        log("new connection %s: %s" % (addr, `c`))
+        log("new connection %s: %s" % (addr, repr(c)))
         return c
 
     def register_connection(self, storage_id, conn):


=== Zope3/src/ZEO/monitor.py 1.5 => 1.6 ===
--- Zope3/src/ZEO/monitor.py:1.5	Fri Nov 28 11:44:47 2003
+++ Zope3/src/ZEO/monitor.py	Sun Apr 25 07:34:15 2004
@@ -20,9 +20,9 @@
 import socket
 import time
 import types
+import logging
 
 import ZEO
-import zLOG
 
 class StorageStats:
     """Per-storage usage statistics."""
@@ -129,7 +129,8 @@
         else:
             self.create_socket(socket.AF_UNIX, socket.SOCK_STREAM)
         self.set_reuse_addr()
-        zLOG.LOG("ZSM", zLOG.INFO, "monitor listening on %s" % repr(self.addr))
+        logger = logging.getLogger('ZEO.monitor')
+        logger.info("listening on %s", repr(self.addr))
         self.bind(self.addr)
         self.listen(5)
 


=== Zope3/src/ZEO/runzeo.py 1.23 => 1.24 ===
--- Zope3/src/ZEO/runzeo.py:1.23	Thu Apr 22 14:45:52 2004
+++ Zope3/src/ZEO/runzeo.py	Sun Apr 25 07:34:15 2004
@@ -36,17 +36,27 @@
 import sys
 import signal
 import socket
+import logging
 
 import ZConfig, ZConfig.datatypes
-import zLOG
 import ZEO
 from zdaemon.zdoptions import ZDOptions
 
+logger = logging.getLogger('ZEO.runzeo')
+_pid = str(os.getpid())
+
+def log(msg, level=logging.INFO, exc_info=False):
+    """Internal: generic logging function."""
+    message = "(%s) %s" % (_pid, msg)
+    logger.log(level, message, exc_info=exc_info)
+
+
 def parse_address(arg):
     # XXX Not part of the official ZConfig API
     obj = ZConfig.datatypes.SocketAddress(arg)
     return obj.family, obj.address
 
+
 class ZEOOptionsMixin:
 
     storages = None
@@ -129,7 +139,6 @@
         if self.options.config_logger is not None:
             return
         # No log file is configured; default to stderr.
-        import logging
         logger = logging.getLogger()
         handler = logging.StreamHandler()
         handler.setLevel(logging.INFO)
@@ -160,8 +169,8 @@
     def open_storages(self):
         self.storages = {}
         for opener in self.options.storages:
-            info("opening storage %r using %s"
-                 % (opener.name, opener.__class__.__name__))
+            log("opening storage %r using %s"
+                % (opener.name, opener.__class__.__name__))
             self.storages[opener.name] = opener.open()
 
     def setup_signals(self):
@@ -202,33 +211,31 @@
         ThreadedAsync.LoopCallback.loop()
 
     def handle_sigterm(self):
-        info("terminated by SIGTERM")
+        log("terminated by SIGTERM")
         sys.exit(0)
 
     def handle_sigint(self):
-        info("terminated by SIGINT")
+        log("terminated by SIGINT")
         sys.exit(0)
 
     def handle_sighup(self):
-        info("restarted by SIGHUP")
+        log("restarted by SIGHUP")
         sys.exit(1)
 
     def handle_sigusr2(self):
-        # This requires a modern zLOG (from Zope 2.6 or later); older
-        # zLOG packages don't have the initialize() method
-        info("reinitializing zLOG")
-        # XXX Shouldn't this be below with _log()?
-        import zLOG
-        zLOG.initialize()
-        info("reinitialized zLOG")
+        # XXX this used to reinitialize zLOG. How do I achieve
+        #     the same effect with Python's logging package?
+        #     Should we restart as with SIGHUP?
+        log("received SIGUSR2, but it was not handled!", level=logging.WARNING)
 
     def close_storages(self):
         for name, storage in self.storages.items():
-            info("closing storage %r" % name)
+            log("closing storage %r" % name)
             try:
                 storage.close()
             except: # Keep going
-                exception("failed to close storage %r" % name)
+                log("failed to close storage %r" % name,
+                    level=logging.EXCEPTION, exc_info=True)
 
 
 # Signal names
@@ -255,43 +262,6 @@
             continue
         if k_startswith("SIG") and not k_startswith("SIG_"):
             signames[sig] = name
-
-
-# Log messages with various severities.
-# This uses zLOG, but the API is a simplified version of PEP 282
-
-def critical(msg):
-    """Log a critical message."""
-    _log(msg, zLOG.PANIC)
-
-def error(msg):
-    """Log an error message."""
-    _log(msg, zLOG.ERROR)
-
-def exception(msg):
-    """Log an exception (an error message with a traceback attached)."""
-    _log(msg, zLOG.ERROR, error=sys.exc_info())
-
-def warn(msg):
-    """Log a warning message."""
-    _log(msg, zLOG.PROBLEM)
-
-def info(msg):
-    """Log an informational message."""
-    _log(msg, zLOG.INFO)
-
-def debug(msg):
-    """Log a debugging message."""
-    _log(msg, zLOG.DEBUG)
-
-# XXX It would be nice if a program that extended this one (like
-# runzrs) could change the label.
-
-_label = "RUNZEO:%d" % os.getpid()
-
-def _log(msg, severity=zLOG.INFO, error=None):
-    """Internal: generic logging function."""
-    zLOG.LOG(_label, severity, msg, "", error)
 
 
 # Main program




More information about the Zodb-checkins mailing list