[Checkins] SVN: zope.rdb/tags/3.4.1/ - Adjust version on tag

Sidnei da Silva sidnei at enfoldsystems.com
Fri Oct 10 12:53:37 EDT 2008


Log message for revision 91998:
   - Adjust version on tag

Changed:
  A   zope.rdb/tags/3.4.1/
  D   zope.rdb/tags/3.4.1/CHANGES.txt
  A   zope.rdb/tags/3.4.1/CHANGES.txt
  U   zope.rdb/tags/3.4.1/setup.py
  D   zope.rdb/tags/3.4.1/src/zope/rdb/gadfly/gfdb0.py
  A   zope.rdb/tags/3.4.1/src/zope/rdb/gadfly/gfdb0.py
  D   zope.rdb/tags/3.4.1/src/zope/rdb/gadfly/sqlbind.py
  A   zope.rdb/tags/3.4.1/src/zope/rdb/gadfly/sqlbind.py
  U   zope.rdb/tags/3.4.1/src/zope/rdb/gadflyzcml.py
  D   zope.rdb/tags/3.4.1/src/zope/rdb/interfaces.py
  A   zope.rdb/tags/3.4.1/src/zope/rdb/interfaces.py
  U   zope.rdb/tags/3.4.1/src/zope/rdb/tests/test_gadflyrootdirective.py

-=-
Copied: zope.rdb/tags/3.4.1 (from rev 91891, zope.rdb/trunk)


Property changes on: zope.rdb/tags/3.4.1
___________________________________________________________________
Name: svn:ignore
   + bin
build
dist
lib
develop-eggs
eggs
parts
.installed.cfg

Name: svn:mergeinfo
   + 

Deleted: zope.rdb/tags/3.4.1/CHANGES.txt
===================================================================
--- zope.rdb/trunk/CHANGES.txt	2008-10-08 03:53:21 UTC (rev 91891)
+++ zope.rdb/tags/3.4.1/CHANGES.txt	2008-10-10 16:53:36 UTC (rev 91998)
@@ -1,7 +0,0 @@
-Change History
-==============
-
-3.4.0 (2007/09/01)
-------------------
-
-Initial release as an independent package

Copied: zope.rdb/tags/3.4.1/CHANGES.txt (from rev 91991, zope.rdb/trunk/CHANGES.txt)
===================================================================
--- zope.rdb/tags/3.4.1/CHANGES.txt	                        (rev 0)
+++ zope.rdb/tags/3.4.1/CHANGES.txt	2008-10-10 16:53:36 UTC (rev 91998)
@@ -0,0 +1,19 @@
+Change History
+==============
+
+3.4.1 (2008/10/10)
+------------------
+
+- Remove body of DatabaseException, base Exception class already
+  provides the same functionality.
+
+- Use hashlib.md5 instead of md5.new if available. md5 module is
+  deprecated and will be removed in a future Python release.
+
+- Remove usage of 'as' as variable name. 'as' is a keyword in Python
+  2.6 and generates a SyntaxError.
+
+3.4.0 (2007/09/01)
+------------------
+
+- Initial release as an independent package

Modified: zope.rdb/tags/3.4.1/setup.py
===================================================================
--- zope.rdb/trunk/setup.py	2008-10-08 03:53:21 UTC (rev 91891)
+++ zope.rdb/tags/3.4.1/setup.py	2008-10-10 16:53:36 UTC (rev 91998)
@@ -34,7 +34,7 @@
 
 setup(
     name='zope.rdb',
-    version='3.4.1dev',
+    version='3.4.1',
     url='http://pypi.python.org/pypi/zope.rdb',
     author='Zope Corporation and Contributors',
     author_email='zope3-dev at zope.org',

Deleted: zope.rdb/tags/3.4.1/src/zope/rdb/gadfly/gfdb0.py
===================================================================
--- zope.rdb/trunk/src/zope/rdb/gadfly/gfdb0.py	2008-10-08 03:53:21 UTC (rev 91891)
+++ zope.rdb/tags/3.4.1/src/zope/rdb/gadfly/gfdb0.py	2008-10-10 16:53:36 UTC (rev 91998)
@@ -1,1412 +0,0 @@
-"""storage objects"""
-
-verbosity = 0
-
-import os
-
-# use whatever kjbuckets sqlsem is using
-#from sqlsem import kjbuckets, maketuple
-
-# error on checking of data integrity
-StorageError = "StorageError"
-
-# use md5 checksum (stub if md5 unavailable?)
-def checksum(string):
-    from md5 import new
-    return new(string).digest()
-
-def recursive_dump(data, prefix="["):
-    """for debugging"""
-    from types import StringType
-    if type(data) is StringType:
-        #print prefix, data
-        return
-    p2 = prefix+"["
-    try:
-        for x in data:
-            recursive_dump(x, p2)
-    except:
-        print prefix, data
-
-def checksum_dump(data, file):
-    """checksum and dump marshallable data to file"""
-    #print "checksum_dump", file
-    #recursive_dump(data)
-    from marshal import dumps, dump
-    #print "data\n",data
-    storage = dumps(data)
-    checkpair = (checksum(storage), storage)
-    dump(checkpair, file)
-
-def checksum_undump(file):
-    """undump marshallable data from file, checksum"""
-    from marshal import load, loads
-    checkpair = load(file)
-    (check, storage) = checkpair
-    if checksum(storage)!=check:
-        raise StorageError, "data load checksum fails"
-    data = loads(storage)
-    return data
-
-def backup_file(filename, backupname):
-    """backup file, if unopenable ignore"""
-    try:
-        f = open(filename, "rb")
-    except:
-        return
-    data = f.read()
-    f.close()
-    f = open(backupname, "wb")
-    f.write(data)
-    f.close()
-
-def del_file(filename):
-    """delete file, ignore errors"""
-    from os import unlink
-    try:
-        unlink(filename)
-    except:
-        pass
-
-class Database0:
-    """quick and dirty in core database representation."""
-
-    # db.log is not None == use db.log to log modifications
-
-    # set for verbose prints
-    verbose = verbosity
-
-    # set for read only copy
-    readonly = 0
-
-    # set for temp/scratch db copy semantics
-    is_scratch = 0
-
-    # set to add introspective tables
-    introspect = 1
-
-    def __init__(self, shadowing=None, log=None):
-        """dictionary of relations."""
-        verbose = self.verbose
-        self.shadowing = shadowing
-        self.log = log
-        self.touched = 0
-        if log:
-            self.is_scratch = log.is_scratch
-        if shadowing and not log:
-            raise ValueError, "shadowing db requires log"
-        if verbose:
-            print "Database0 init"
-            if log:
-                log.verbose = 1
-        if shadowing:
-            # shadow structures of shadowed db
-            self.rels = shadow_dict(shadowing.rels, Relation0.unshadow)
-            self.datadefs = shadow_dict(shadowing.datadefs)
-            self.indices = shadow_dict(shadowing.indices)
-        else:
-            self.rels = {}
-            self.datadefs = {}
-            self.indices = {}
-            if self.introspect:
-                self.set_introspection()
-
-    def set_introspection(self):
-        import gfintrospect
-        self["dual"] = gfintrospect.DualView()
-        self["__table_names__"] = gfintrospect.RelationsView()
-        self["__datadefs__"] = gfintrospect.DataDefsView()
-        self["__indices__"] = gfintrospect.IndicesView()
-        self["__columns__"] = gfintrospect.ColumnsView()
-        self["__indexcols__"] = gfintrospect.IndexAttsView()
-
-    def reshadow(self, db, dblog):
-        """(re)make self into shadow of db with dblog"""
-        self.shadowing = db
-        self.log = dblog
-        self.rels = shadow_dict(db.rels, Relation0.unshadow)
-        self.datadefs = shadow_dict(db.datadefs)
-        self.indices = shadow_dict(db.indices)
-
-    def clear(self):
-        """I'm not sure if database has circular structure, so this added"""
-        self.shadowing = None
-        self.log = None
-        self.rels = {}
-        self.datadefs = {}
-        self.indices = {}
-
-    def commit(self):
-        """commit shadowed changes"""
-        verbose = self.verbose
-        if self.shadowing and self.touched:
-            # log commit handled elsewhere
-            #log = self.log
-            #if log and not log.is_scratch:
-               #if verbose: print "committing log"
-               #self.log.commit(verbose)
-            if verbose: print "committing rels"
-            self.rels.commit(verbose)
-            if verbose: print "committing datadefs"
-            self.datadefs.commit(verbose)
-            if verbose: print "committing indices"
-            self.indices.commit(verbose)
-            st = self.shadowing.touched
-            if not st:
-                if verbose: "print setting touched", self.touched
-                self.shadowing.touched = self.touched
-            elif verbose:
-                print "shadowed database is touched"
-        elif verbose:
-            print "db0: commit on nonshadow instance"
-
-    def __setitem__(self, name, relation):
-        """bind a name (uppercased) to tuples as a relation."""
-        from string import upper
-        if self.indices.has_key(name):
-            raise NameError, "cannot set index"
-        self.rels[ upper(name) ] = relation
-        if self.verbose: print "db0 sets rel", name
-
-    def add_index(self, name, index):
-        if self.rels.has_key(name):
-            raise NameError, `name`+": is relation"
-        self.indices[name] = index
-        if self.verbose: print "db0 sets index", name
-
-    def drop_index(self, name):
-        if self.verbose: print "db0 drops index", name
-        del self.indices[name]
-
-    def __getitem__(self, name):
-        if self.verbose: print "db0 gets rel", name
-        from string import upper
-        return self.rels[upper(name)]
-
-    def get_for_update(self, name):
-        """note: does not imply updates, just possibility of them"""
-        verbose = self.verbose
-        if verbose: print "db0 gets rel for update", name
-        shadowing = self.shadowing
-        gotit = 0
-        from string import upper
-        name = upper(name)
-        rels = self.rels
-        if shadowing:
-            if rels.is_shadowed(name):
-                test = rels[name]
-                # do we really have a shadow or a db copy?
-                if test.is_shadow:
-                    gotit = 1
-            if not gotit:
-                if shadowing.has_relation(name):
-                    test = shadowing.get_for_update(name)
-                else:
-                    # uncommitted whole relation
-                    test = rels[name]
-                    gotit = 1
-        else:
-            test = rels[name]
-            gotit = 1
-        if self.readonly:
-            raise ValueError, "cannot update, db is read only"
-        elif test.is_view:
-            raise ValueError, "VIEW %s cannot be updated" % name
-        elif shadowing and not gotit:
-            if verbose: print "db0: making shadow for", name
-            if test.is_shadow: return test
-            shadow = Relation0(())
-            shadow = shadow.shadow(test, self.log, name, self)
-            rels[name] = shadow
-            return shadow
-        else:
-            return test
-
-    def __delitem__(self, name):
-        if self.verbose: print "db0 drops rel", name
-        from string import upper
-        del self.rels[upper(name)]
-
-    def relations(self):
-        return self.rels.keys()
-
-    def has_relation(self, name):
-        return self.rels.has_key(name)
-
-    def getdatadefs(self):
-        result = self.datadefs.values()
-        # sort to make create tables first, eg
-        result.sort()
-        return result
-
-    def add_datadef(self, name, defn, logit=1):
-        """only log the datadef if logit is set, else ignore redefinitions"""
-        dd = self.datadefs
-        if logit and dd.has_key(name):
-            raise KeyError, `name`+": already defined"
-        if logit:
-            self.touched = 1
-        dd[name] = defn
-
-    def has_datadef(self, name):
-        return self.datadefs.has_key(name)
-
-    def drop_datadef(self, name):
-        if self.verbose: print "db0 drops datadef",name
-        dd = self.datadefs
-        #print dd.keys()
-        if not dd.has_key(name):
-            raise KeyError, `name`+": no such element"
-        del dd[name]
-
-    def __repr__(self):
-        l = []
-        from string import join
-        l.append("INDICES: "+`self.indices.keys()`)
-        for (name, ddef) in self.datadefs.items():
-            l.append("data definition %s::\n%s" % (name, ddef))
-        for (name, rel) in self.rels.items():
-            l.append(name + ":")
-            l.append(rel.irepr())
-        return join(l, "\n\n")
-
-    def bindings(self, fromlist):
-        """return (attdict, reldict, amb, ambatts) from fromlist = [(name,alias)...]
-           where reldict: alias > tuplelist
-                 attdict: attribute_name > unique_relation
-                 amb: dict of dottedname > (rel, att)
-                 ambatts: dict of ambiguous_name > witness_alias
-        """
-        from string import upper
-        rels = self.rels
-        ambiguous_atts = {}
-        ambiguous = {}
-        relseen = {}
-        attbindings = {}
-        relbindings = {}
-        for (name,alias) in fromlist:
-            name = upper(name)
-            alias = upper(alias)
-            if relseen.has_key(alias):
-                raise NameError, `alias` + ": bound twice in from list"
-            relseen[alias]=alias
-            try:
-                therel = rels[name]
-            except KeyError:
-                raise NameError, `name` + " no such relation in DB"
-            relbindings[alias] = therel
-            for attname in therel.attributes():
-                if not ambiguous_atts.has_key(attname):
-                    if attbindings.has_key(attname):
-                        oldrel = attbindings[attname]
-                        oldbind = (oldrel, attname)
-                        ambiguous[ "%s.%s" % oldbind] = oldbind
-                        del attbindings[attname]
-                        ambiguous_atts[attname]=alias
-                        newbind = (alias, attname)
-                        ambiguous[ "%s.%s" % newbind ] = newbind
-                    else:
-                        attbindings[attname] = alias
-                else:
-                    newbind = (alias, attname)
-                    ambiguous[ "%s.%s" % newbind ] = newbind
-        return (attbindings, relbindings, ambiguous, ambiguous_atts)
-
-class File_Storage0:
-    """quick and dirty file storage mechanism.
-         relation names in directory/dbname.gfd
-           contains a white separated list of relation names
-         relations in directory/relname.grl
-           contains sequence of marshalled tuples reps
-           prefixed by marshalled list of atts
-    """
-
-    verbose = verbosity
-
-    def __init__(self, dbname, directory):
-        """directory must exist."""
-        if self.verbose: print "fs0 init:", dbname, directory
-        self.dbname = dbname
-        self.directory = directory
-        self.relation_implementation = Relation0
-        self.recovery_mode = 0
-
-    def load(self, parser=None, forscratch=0):
-        # if logfile is present, need to recover
-        # error condition: fail to load relation, ddf, but no log file!
-        logfile = self.logfilename()
-        blogfile = self.backup_logfilename()
-        verbose = self.verbose
-        if verbose: print "fs0 load, checking", logfile
-        try:
-            testlog = open(logfile, "rb")
-            if verbose: print "fs0: opened", testlog
-            testlog.close()
-            testlog = open(blogfile, "rb")
-            testlog.close()
-            testlog = None
-        except:
-            recovery_mode = self.recovery_mode = 0
-            if verbose: print "recovery not needed"
-        else:
-            recovery_mode = self.recovery_mode = 1
-            if verbose: print "FS0 RECOVERY MODE LOAD!"
-        resultdb = Database0()
-        resultdb.is_scratch = forscratch
-        commands = self.get_initstatements()
-        #commands = parser.DoParse1(initstatements)
-        for command in commands:
-            if verbose: print "fs0 evals", command
-            command.relbind(resultdb)
-            command.eval()
-        for name in resultdb.relations():
-            if verbose: print "fs0 loads rel", name
-            rel = resultdb[name]
-            if rel.is_view:
-                # don't need to load views
-                continue
-            rel.set_empty()
-            try:
-                data = self.get_relation(name)
-            except StorageError, detail:
-                raise StorageError, "load failure %s: %s" % (name, detail)
-            attsin = tuple(data.attributes())
-            attsout = tuple(rel.attributes())
-            if attsin!=attsout:
-                raise StorageError, "rel %s: atts %s don't match %s" % (
-                   name, attsin, attsout)
-            rel.add_tuples( data.rows() )
-            # in sync!
-            rel.touched = 0
-        # db in sync
-        resultdb.touched = 0
-        # do recovery, if needed
-        if recovery_mode:
-            if verbose: print "fs0 recovering from logfile", logfile
-            # restart the log file only if db is not scratch
-            restart = not forscratch
-            Log = DB_Logger(logfile, blogfile)
-            if verbose: Log.verbose=1
-            Log.recover(resultdb, restart)
-            # do a checkpoint
-            self.recovery_mode = 0
-            if restart and not forscratch:
-                Log.shutdown()
-                Log = None
-                del_file(logfile)
-                if verbose: print "FS0: dumping database"
-                self.dump(resultdb)
-                Log = resultdb.log = DB_Logger(logfile, blogfile)
-                Log.startup()
-        elif not forscratch:
-            Log = DB_Logger(logfile, blogfile)
-            Log.startup()
-            resultdb.log = Log
-        return resultdb
-
-    def relfilename(self, name):
-        #return "%s/%s.grl" % (self.directory, name)
-        return os.path.join(self.directory, name+".grl")
-
-    def backup_relfilename(self, name):
-        #return "%s/%s.brl" % (self.directory, name)
-        return os.path.join(self.directory, name+".brl")
-
-    def relfile(self, name, mode="rb"):
-        if self.recovery_mode:
-            return self.getfile_fallback(
-         self.backup_relfilename(name), self.relfilename(name), mode)
-        else:
-            name = self.relfilename(name)
-            return open(name, mode)
-
-    def getfile_fallback(self, first, second, mode):
-        try:
-            return open(first, mode)
-        except:
-            return open(second, mode)
-
-    def get_relation(self, name):
-        f = self.relfile(name, "rb")
-        rel = self.relation_implementation(())
-        try:
-            rel.load(f)
-        except StorageError:
-            if self.recovery_mode:
-                f = open(self.relfilename(name), "rb")
-                rel.load(f)
-            else:
-                raise StorageError, \
-   "fs: could not unpack backup rel file or rel file in recovery mode: "+name
-        return rel
-
-    def dbfilename(self):
-        #return "%s/%s.gfd" % (self.directory, self.dbname)
-        return os.path.join(self.directory, self.dbname+".gfd")
-
-    def backup_dbfilename(self):
-        #return "%s/%s.bfd" % (self.directory, self.dbname)
-        return os.path.join(self.directory, self.dbname+".bfd")
-
-    def logfilename(self):
-        #return "%s/%s.gfl" % (self.directory, self.dbname)
-        return os.path.join(self.directory, self.dbname+".gfl")
-
-    def backup_logfilename(self):
-        #return "%s/%s.glb" % (self.directory, self.dbname)
-        return os.path.join(self.directory, self.dbname+".glb")
-
-    def get_initstat_file(self, mode):
-        if self.recovery_mode:
-            return self.getfile_fallback(
-             self.backup_dbfilename(), self.dbfilename(), mode)
-        else:
-            return open(self.dbfilename(), mode)
-
-    def get_initstatements(self):
-        f = self.get_initstat_file("rb")
-        if self.verbose:
-            print "init statement from file", f
-        try:
-            data = checksum_undump(f)
-        except StorageError:
-            if self.recovery_mode:
-                f = open(self.dbfilename, "rb")
-                data = checksum_undump(f)
-            else:
-                raise StorageError, \
-   "could not unpack ddf backup or ddf file in recovery mode: "+self.dbname
-        f.close()
-        from sqlsem import deserialize
-        stats = map(deserialize, data)
-        return stats
-
-    def dump(self, db):
-        """perform a checkpoint (no active transactions!)"""
-        # db should be non-shadowing db
-        # first thing: back up the log
-        backup_file(self.logfilename(), self.backup_logfilename())
-        verbose = self.verbose
-        if verbose: print "fs0: checkpointing db"
-        if db.is_scratch or db.readonly:
-            # don't need to do anything.
-            if verbose: print "fs0: scratch or readonly, returning"
-            return
-        log = db.log
-        if log:
-            log.commit()
-            if verbose:
-                print "DEBUG LOG TRACE"
-                log.dump()
-            log.shutdown()
-        if db.touched:
-            if verbose: print "fs0: db touched, backing up ddf file"
-            backup_file(self.dbfilename(),
-                        self.backup_dbfilename())
-        relations = db.relations()
-        for r in relations:
-            rel = db[r]
-            #print r
-            if rel.touched:
-                if verbose: print "fs0: backing up touched rel", r
-                backup_file(self.relfilename(r),
-                            self.backup_relfilename(r))
-        for r in relations:
-            if verbose: print "fs0: dumping relations now"
-            self.dumprelation(r, db[r])
-        if verbose: print "fs0: dumping datadefs now"
-        self.dumpdatadefs(db)
-        # del of logfile signals successful commit.
-        if verbose: print "fs0: successful dump, deleting log file"
-        logfilename = self.logfilename()
-        blogfilename = self.backup_logfilename()
-        del_file(logfilename)
-        del_file(blogfilename)
-        if db.touched:
-            if verbose: print "fs0: deleting backup ddf file"
-            del_file(self.backup_dbfilename())
-            db.touched = 0
-        for r in relations:
-            rel = db[r]
-            if rel.touched:
-                if verbose: print "fs0: deleting rel backup", r
-                del_file(self.backup_relfilename(r))
-            rel.touched = 0
-        if verbose: print "fs0: restarting db log"
-        log = db.log = DB_Logger(logfilename, blogfilename)
-        log.startup()
-        if verbose: print "fs0: dump complete"
-        self.recovery_mode = 0
-
-    def dumprelation(self, name, rel, force=0):
-        """set force to ignore the "touch" flag."""
-        # ignore self.backup_mode
-        if (force or rel.touched) and not rel.is_view:
-            fn = self.relfilename(name)
-            if self.verbose:
-                print "dumping touched rel", name, "to", fn
-            f = open(fn, "wb")
-            rel.dump(f)
-
-    def dumpdatadefs(self, db, force=0):
-        """set force to ignore the touch flag"""
-        # ignore self.backup_mode
-        if not (force or db.touched): return
-        #from marshal import dump, dumps
-        fn = self.dbfilename()
-        f = open(fn, "wb")
-        datadefs = db.getdatadefs()
-        from sqlsem import serialize
-        datadefsd = map(serialize, datadefs)
-        #for (defn, ser) in map(None, datadefs, datadefsd):
-            #print defn
-            #print ser
-            #dumps(ser)  ### debug test
-        checksum_dump(datadefsd, f)
-        f.close()
-
-class Relation0:
-    """quick and dirty in core relation representation.
-         self.tuples contains tuples or 0 if erased.
-       tuples must not move (to preserve indices)
-       unless indices regenerate.
-    """
-
-    is_view = 0 # Relation0 is not a view
-
-    def __init__(self, attribute_names, tuples=None, filter=None):
-        from sqlsem import kjbuckets
-        self.indices = kjbuckets.kjGraph()
-        self.index_list = []
-        self.attribute_names = attribute_names
-        if tuples is None:
-            tuples = []
-        self.filter = filter
-        self.set_empty()
-        self.add_tuples(tuples)
-        # indices map attname > indices containing att
-        # relation to shadow and log (if non-null)
-        self.log = None
-        self.name = None # anonymous by default
-        self.is_shadow = 0
-        self.touched = 0
-
-    def shadow(self, otherrelation, log, name, inshadowdb):
-        """return structural replica of otherrelation (as self)
-
-           for non-updatable relation (eg, view) may return otherrelation"""
-        if otherrelation.is_view:
-            # for now, assume VIEWS CANNOT BE UPDATED
-            return otherrelation
-        self.is_shadow = 1
-        self.shadow_of_shadow = otherrelation.is_shadow
-        self.log = log
-        self.name = name
-        # don't make any updates permanent if set.
-        self.tuples = otherrelation.tuples[:]
-        self.attribute_names = otherrelation.attribute_names
-        self.filter = otherrelation.filter
-        for index in otherrelation.index_list:
-            copy = index.copy()
-            name = copy.name
-            self.add_index(copy, recordtuples=0)
-            # record in shadowdb, but don't log it
-            inshadowdb.add_index(name, copy)
-            #inshadowdb.add_datadef(name, copy, logit=0)
-        self.touched = otherrelation.touched
-        return self
-
-    def unshadow(self):
-        """make self into a replacement for shadowed, return self."""
-        if self.is_shadow:
-            self.log = None
-            self.is_shadow = self.shadow_of_shadow
-        return self
-
-    def dump(self, file):
-        attributes = tuple(self.attributes())
-        rows = self.rows()
-        newrows = rows[:]
-        count = 0
-        tt = type
-        from types import IntType
-        for i in xrange(len(rows)):
-            this = rows[i]
-            if this is not None and tt(this) is not IntType:
-                newrows[count] = rows[i].dump(attributes)
-                count = count + 1
-        newrows = newrows[:count]
-        newrows.append(attributes)
-        checksum_dump(newrows, file)
-
-    def load(self, file):
-        """checksum must succeed."""
-        rows = checksum_undump(file)
-        attributes = rows[-1]
-        self.attribute_names = attributes
-        rows = rows[:-1]
-        from sqlsem import kjbuckets
-        undump = kjbuckets.kjUndump
-        for i in xrange(len(rows)):
-            rows[i] = undump(attributes, rows[i])
-        self.set_empty()
-        self.add_tuples(rows)
-        # in sync with disk copy!
-        self.touched = 0
-
-    def add_index(self, index, recordtuples=1):
-        """unset recordtuples if the index is initialized already."""
-        # does not "touch" the relation
-        index_list = self.index_list
-        indices = self.indices
-        atts = index.attributes()
-        for a in atts:
-            indices[a] = index
-        if recordtuples:
-            (tuples, seqnums) = self.rows(1)
-            index.clear()
-            if tuples:
-                index.add_tuples(tuples, seqnums)
-        index_list.append(index)
-
-    def drop_index(self, index):
-        # does not "touch" the relation
-        name = index.name
-        if verbosity:
-            print "rel.drop_index", index
-            print "...", self.indices, self.index_list
-        indices = self.indices
-        for a in index.attributes():
-            # contorted since one index be clone of the other.
-            aindices = indices.neighbors(a)
-            for ind in aindices:
-                if ind.name == name:
-                    indices.delete_arc(a, ind)
-                    theind = ind
-        # the (non-clone) index ought to have been found above...
-        self.index_list.remove(theind)
-
-    def choose_index(self, attributes):
-        """choose an index including subset of attributes or None"""
-        from sqlsem import kjbuckets
-        kjSet = kjbuckets.kjSet
-        atts = kjSet(attributes)
-        #print "choosing index", atts
-        indices = (atts * self.indices).values()
-        choice = None
-        for index in indices:
-            indexatts = index.attributes()
-            #print "index atts", indexatts
-            iatts = kjSet(indexatts)
-            if iatts.subset(atts):
-                if choice is None:
-                    #print "chosen", index.name
-                    choice = index
-                    lchoice = len(choice.attributes())
-                else:
-                    if index.unique or lchoice<len(indexatts):
-                        choice = index
-                        lchoice = len(choice.attributes())
-        return choice
-
-    def __repr__(self):
-        rows = self.rows()
-        atts = self.attributes()
-        list_rep = [list(atts)]
-        for r in rows:
-            rlist = []
-            for a in atts:
-                try:
-                    elt = r[a]
-                except KeyError:
-                    elt = "NULL"
-                else:
-                    elt = str(elt)
-                rlist.append(elt)
-            list_rep.append(rlist)
-        # compute maxen for formatting
-        maxen = [0] * len(atts)
-        for i in xrange(len(atts)):
-            for l in list_rep:
-                maxen[i] = max(maxen[i], len(l[i]))
-        for i in xrange(len(atts)):
-            mm = maxen[i]
-            for l in list_rep:
-                old = l[i]
-                l[i] = old + (" " * (mm-len(old)))
-        from string import join
-        for i in xrange(len(list_rep)):
-            list_rep[i] = join(list_rep[i], " | ")
-        first = list_rep[0]
-        list_rep.insert(1, "=" * len(first))
-        return join(list_rep, "\n")
-
-    def irepr(self):
-        List = [self] + list(self.index_list)
-        List = map(str, List)
-        from string import join
-        return join(List, "\n")
-
-    def set_empty(self):
-        self.tuples = []
-        for index in self.index_list:
-            index.clear()
-
-    def drop_indices(self, db):
-        for index in self.index_list:
-            name = index.name
-            db.drop_datadef(name)
-            db.drop_index(name)
-        self.index_list = []
-        from sqlsem import kjbuckets
-        self.indices = kjbuckets.kjGraph()
-
-    def regenerate_indices(self):
-        (tuples, seqnums) = self.rows(1)
-        #self.tuples = tuples
-        for index in self.index_list:
-            index.clear()
-            index.add_tuples(tuples, seqnums)
-
-    def add_tuples(self, tuples):
-        if not tuples: return
-        tuples = filter(self.filter, tuples)
-        oldtuples = self.tuples
-        first = len(oldtuples)
-        oldtuples[first:] = list(tuples)
-        last = len(oldtuples)
-        for index in self.index_list:
-            index.add_tuples(tuples, xrange(first,last))
-        self.touched = 1
-
-    def attributes(self):
-        return self.attribute_names
-
-    def rows(self, andseqnums=0):
-        tups = self.tuples
-        # short cut
-        if 0 not in tups:
-            if andseqnums:
-                return (tups, xrange(len(tups)))
-            else:
-                return tups
-        tt = type
-        from types import IntType
-        result = list(self.tuples)
-        if andseqnums: seqnums = result[:]
-        count = 0
-        for i in xrange(len(result)):
-            t = result[i]
-            if tt(t) is not IntType:
-                result[count] = t
-                if andseqnums: seqnums[count] = i
-                count = count+1
-        result = result[:count]
-        if andseqnums:
-            return (result, seqnums[:count])
-        else:
-            return result
-
-    def erase_tuples(self, seqnums):
-        #print "et seqnums", seqnums
-        if not seqnums: return
-        tups = self.tuples
-        # order important! indices first!
-        for index in self.index_list:
-            index.erase_tuples(seqnums, tups)
-        for i in seqnums:
-            #print "deleting", i
-            tups[i] = 0
-        #print self
-        self.touched = 1
-
-    def reset_tuples(self, tups, seqnums):
-        # KISS for indices, maybe optimize someday...
-        if not tups: return
-        mytups = self.tuples
-        for index in self.index_list:
-            index.erase_tuples(seqnums, mytups)
-        for i in xrange(len(seqnums)):
-            seqnum = seqnums[i]
-            mytups[seqnum] = tups[i]
-        for index in self.index_list:
-            index.add_tuples(tups, seqnums)
-        self.touched = 1
-
-# should views be here?
-
-class View(Relation0):
-    """view object, acts like relation, with addl operations."""
-    touched = 0
-    is_view = 1
-    is_shadow = 0
-
-    ### must fix namelist!
-
-    def __init__(self, name, namelist, selection, indb):
-        """set namelist to None for implicit namelist"""
-        self.name = name
-        self.namelist = namelist
-        self.selection = selection
-        # attempt a relbind, no outer bindings!
-        self.relbind(indb, {})
-        self.cached_rows = None
-        self.translate = None
-
-    def __repr__(self):
-        return "view %s as %s" % (self.name, self.selection)
-
-    irepr = __repr__
-
-    def uncache(self):
-        self.cached_rows = None
-
-    def UNDEFINED_OP_FOR_VIEW(*args, **kw):
-        raise ValueError, "operation explicitly undefined for view object"
-
-    shadow = dump = load = add_index = drop_index = set_empty = \
-    add_tuples = erase_tuples = reset_tuples = UNDEFINED_OP_FOR_VIEW
-
-    def ignore_op_for_view(*args, **kw):
-        """ignore this op when applied to view"""
-        pass
-
-    drop_indices = regenerate_indices = ignore_op_for_view
-
-    def choose_index(s, a):
-        """no indices on views (might change this?)"""
-        return None
-
-    def relbind(self, db, atts):
-        """bind self to db, ignore atts"""
-        name = self.name
-        selection = self.selection
-        selection = self.selection = selection.relbind(db)
-        namelist = self.namelist
-        if namelist is not None:
-            from sqlsem import kjbuckets
-            target_atts = selection.attributes()
-            if len(namelist)!=len(target_atts):
-                raise "select list and namelist don't match in %s"%name
-            pairs = map(None, namelist, target_atts)
-            self.translate = kjbuckets.kjGraph(pairs)
-        return self
-
-    def attributes(self):
-        namelist = self.namelist
-        if self.namelist is None:
-            return self.selection.attributes()
-        return namelist
-
-    def rows(self, andseqs=0):
-        cached_rows = self.cached_rows
-        if cached_rows is None:
-            cached_rows = self.cached_rows = self.selection.eval().rows()
-            if self.namelist is not None:
-                # translate the attribute names
-                translate = self.translate
-                for i in range(len(cached_rows)):
-                    cached_rows[i] = cached_rows[i].remap(translate)
-        if andseqs:
-            return (cached_rows[:], range(len(cached_rows)))
-        else:
-            return cached_rows[:]
-
-class Index:
-    """Index for tuples in relation.  Tightly bound to relation rep."""
-
-    ### should add "unique index" and check enforce uniqueness...
-
-    def __init__(self, name, attributes, unique=0):
-        self.unique = unique
-        self.name = name
-        self.atts = tuple(attributes)
-        # values > tuples
-        self.index = {}
-        self.dseqnums = {}
-
-    def __repr__(self):
-        un = ""
-        if self.unique: un="UNIQUE "
-        return "%sindex %s on %s" % (un, self.name, self.atts)
-
-    def copy(self):
-        """make a fast structural copy of self"""
-        result = Index(self.name, self.atts, unique=self.unique)
-        rindex = result.index
-        rdseqnums = result.dseqnums
-        myindex = self.index
-        mydseqnums = self.dseqnums
-        for k in myindex.keys():
-            rindex[k] = myindex[k][:]
-        for k in mydseqnums.keys():
-            rdseqnums[k] = mydseqnums[k][:]
-        return result
-
-    def attributes(self):
-        return self.atts
-
-    def matches(self, tuple, translate=None):
-        """return (tuples, seqnums) for tuples matching tuple
-           (with possible translations"""
-        if translate:
-            tuple = translate * tuple
-        atts = self.atts
-        dump = tuple.dump(atts)
-        index = self.index
-        if index.has_key(dump):
-            return (index[dump], self.dseqnums[dump])
-        else:
-            return ((), ())
-
-    def clear(self):
-        self.index = {}
-        self.dseqnums = {}
-
-    def add_tuples(self, tuples, seqnums):
-        unique = self.unique
-        atts = self.atts
-        index = self.index
-        dseqnums = self.dseqnums
-        test = index.has_key
-        for i in xrange(len(tuples)):
-            tup = tuples[i]
-            seqnum = seqnums[i]
-            dump = tup.dump(atts)
-            #print self.name, dump
-            if test(dump):
-                bucket = index[dump]
-                #print "self", self
-                #print "unique", unique
-                #print "bucket", bucket
-                if unique and bucket:
-                    raise StorageError, "uniqueness violation: %s %s" %(
-                      dump, self)
-                bucket.append(tup)
-                dseqnums[dump].append(seqnum)
-            else:
-                index[dump] = [tup]
-                dseqnums[dump] = [seqnum]
-
-    def erase_tuples(self, seqnums, all_tuples):
-        # all_tuples must be internal rel tuple list
-        atts = self.atts
-        index = self.index
-        dseqnums = self.dseqnums
-        for seqnum in seqnums:
-            tup = all_tuples[seqnum]
-            dump = tup.dump(atts)
-            index[dump].remove(tup)
-            dseqnums[dump].remove(seqnum)
-
-class shadow_dict:
-    """shadow dictionary. defer & remember updates."""
-    verbose = verbosity
-    def __init__(self, shadowing, value_transform=None):
-        self.shadowed = shadowing
-        shadow = self.shadow = {}
-        self.touched = {}
-        for key in shadowing.keys():
-            shadow[key] = shadowing[key]
-        self.value_transform = value_transform
-        # defeats inheritance! careful!
-        self.values = shadow.values
-        self.items = shadow.items
-        self.keys = shadow.keys
-        self.has_key = shadow.has_key
-
-    def is_shadowed(self, name):
-        return self.touched.has_key(name)
-
-    def __len__(self):
-        return len(self.shadow)
-
-    def commit(self, verbose=0):
-        """apply updates to shadowed."""
-        import sys
-        verbose = verbose or self.verbose
-        if self.touched:
-            shadowed = self.shadowed
-            shadow = self.shadow
-            value_transform = self.value_transform
-            keys = shadowed.keys()
-            if verbose:
-                print "shadowdict oldkeys", keys
-            for k in keys:
-                del shadowed[k]
-            keys = shadow.keys()
-            if verbose:
-                print "shadowdict newkeys", keys
-            for k in shadow.keys():
-                value = shadow[k]
-                if value_transform is not None:
-                    try:
-                        value = value_transform(value)
-                    except:
-                        raise "transform fails", (sys.exc_type, sys.exc_value, k, value)
-                shadowed[k] = value
-            self.touched = {}
-
-    def __getitem__(self, key):
-        return self.shadow[key]
-
-    def __setitem__(self, key, item):
-        from types import StringType
-        if type(key) is not StringType:
-            raise "nonstring", key
-        if item is None:
-            raise "none set", (key, item)
-        self.touched[key] = 1
-        self.shadow[key] = item
-
-    def __delitem__(self, key):
-        self.touched[key] = 1
-        del self.shadow[key]
-
-# stored mutations on relations
-class Add_Tuples:
-    """stored rel.add_tuples(tuples)"""
-    def __init__(self, name):
-        self.to_rel = name
-        self.indb = None
-    def initargs(self):
-        return (self.to_rel,)
-    def set_data(self, tuples, rel):
-        """store self.data as tuple with tuple[-1] as to_rel, rest data"""
-        attributes = tuple(rel.attributes())
-        ltuples = len(tuples)
-        data = list(tuples)
-        for i in xrange(ltuples):
-            tdata = tuples[i].dump(attributes)
-            data[i] = tdata
-        self.data = tuple(data)
-    def __repr__(self):
-        from string import join
-        datarep = map(repr, self.data)
-        datarep = join(datarep, "\n  ")
-        return "add tuples to %s\n  %s\n\n" % (self.to_rel, datarep)
-    def marshaldata(self):
-        return self.data
-    def demarshal(self, data):
-        self.data = data
-    def relbind(self, db):
-        self.indb = db
-    def eval(self, dyn=None):
-        """apply operation to db"""
-        db = self.indb
-        data = self.data
-        name = self.to_rel
-        rel = db[name]
-        attributes = tuple(rel.attributes())
-        tuples = list(data)
-        from sqlsem import kjbuckets
-        undump = kjbuckets.kjUndump
-        for i in xrange(len(tuples)):
-            tuples[i] = undump(attributes, tuples[i])
-        rel.add_tuples(tuples)
-
-class Erase_Tuples(Add_Tuples):
-    """stored rel.erase_tuples(seqnums)"""
-    def set_data(self, seqnums, rel):
-        seqnums = list(seqnums)
-        self.data = tuple(seqnums)
-    def __repr__(self):
-        return "Erase seqnums in %s\n  %s\n\n" % (self.to_rel, self.data)
-    def eval(self, dyn=None):
-        db = self.indb
-        seqnums = self.data
-        name = self.to_rel
-        rel = db[name]
-        rel.erase_tuples(seqnums)
-
-class Reset_Tuples(Add_Tuples):
-    """stored rel.reset_tuples(tups, seqnums)"""
-    def set_data(self, tups, seqnums, rel):
-        attributes = tuple(rel.attributes())
-        dtups = list(tups)
-        for i in xrange(len(dtups)):
-            dtups[i] = dtups[i].dump(attributes)
-        self.data = (tuple(dtups), tuple(seqnums))
-    def __repr__(self):
-        (dtups, seqnums) = self.data
-        pairs = map(None, seqnums, dtups)
-        from string import join
-        datarep = map(repr, pairs)
-        datarep = join(datarep, "  \n")
-        return "Reset tuples in %s\n  %s\n\n" % (self.to_rel, datarep)
-    def eval(self, dyn=None):
-        db = self.indb
-        (dtups, seqnums) = self.data
-        tups = list(dtups)
-        rel = db[self.to_rel]
-        attributes = tuple(rel.attributes())
-        from sqlsem import kjbuckets
-        undump = kjbuckets.kjUndump
-        for i in xrange(len(dtups)):
-            tups[i] = undump(attributes, dtups[i])
-        rel.reset_tuples(tups, seqnums)
-
-# Log entry tags
-START = "START"
-COMMIT = "COMMIT"
-ABORT = "ABORT"
-UNREADABLE = "UNREADABLE"
-
-class Transaction_Logger:
-    """quick and dirty Log implementation per transaction."""
-    verbose = verbosity
-
-    def __init__(self, db_log, transactionid, is_scratch=0):
-        self.db_log = db_log
-        self.transactionid = transactionid
-        # ignore all operations if set
-        self.is_scratch = is_scratch
-        self.dirty = 0
-        self.deferred = []
-
-    def reset(self):
-        self.deferred = []
-
-    def __repr__(self):
-        return "Transaction_Logger(%s, %s, %s)" % (
-           self.db_log, self.transactionid, self.is_scratch)
-
-    def log(self, operation):
-        verbose = self.verbose
-        tid = self.transactionid
-        if not self.is_scratch:
-            self.deferred.append(operation)
-            if verbose:
-                print "tid logs", tid, operation
-
-    def flush(self):
-        verbose = self.verbose
-        if not self.is_scratch:
-            tid = self.transactionid
-            deferred = self.deferred
-            self.deferred = []
-            db_log = self.db_log
-            if db_log:
-                for operation in deferred:
-                    db_log.log(operation, tid)
-            self.dirty = 1
-        elif verbose:
-            print "scratch log ignored", tid, operation
-
-    def commit(self, verbose=0):
-        verbose = self.verbose or verbose
-        tid = self.transactionid
-        if verbose: print "committing trans log", tid
-        if self.is_scratch:
-            if verbose:
-                print "scratch commit ignored", tid
-            return
-        if not self.dirty:
-            if verbose:
-                print "nondirty commit", tid
-            return
-        self.flush()
-        db_log = self.db_log
-        db_log.commit(verbose, tid)
-        if verbose:
-            print "transaction is considered recoverable", tid
-
-class DB_Logger:
-    """quick and dirty global db logger."""
-    verbose = verbosity
-    is_scratch = 0
-
-    def __init__(self, filename, backupname):
-        self.filename = filename
-        # backup name is never kept open: existence indicates log in use.
-        self.backupname = backupname
-        self.file = None
-        self.dirty = 0
-        if self.verbose:
-            print id(self), "created DB_Logger on", self.filename
-
-    def __repr__(self):
-        return "DB_Logger(%s)" % self.filename
-
-    def startup(self):
-        if self.verbose:
-            print id(self), "preparing", self.filename
-        # open happens automagically
-        #self.file = open(self.filename, "wb")
-        self.clear()
-        self.dirty = 0
-
-    def shutdown(self):
-        if self.verbose:
-            print id(self), "shutting down log", self.filename
-        file = self.file
-        if file:
-            file.close()
-        self.file = None
-
-    def clear(self):
-        if self.verbose:
-            print id(self), "clearing"
-        self.shutdown()
-        del_file(self.filename)
-
-    def restart(self):
-        if self.verbose:
-            print id(self), "restarting log file", self.filename
-        if self.file is not None:
-            self.file.close()
-        self.file = open(self.filename, "ab")
-        dummy = open(self.backupname, "ab")
-        dummy.close()
-        self.dirty = 0
-
-    def clear_log_file(self):
-        if self.verbose:
-            print id(self), "clearing logfile", self.filename
-        if self.file is not None:
-            self.file.close()
-            self.file = None
-        del_file(self.filename)
-        del_file(self.backupname)
-        self.dirty = 0
-
-    def log(self, operation, transactionid=None):
-        """transactionid of None means no transaction: immediate."""
-        file = self.file
-        if file is None:
-            self.restart()
-            file = self.file
-        verbose = self.verbose
-        from sqlsem import serialize
-        serial = serialize(operation)
-        data = (transactionid, serial)
-        if verbose:
-            print id(self), "logging:", transactionid
-            print operation
-        checksum_dump(data, file)
-        self.dirty = 1
-
-    def commit(self, verbose=0, transactionid=None):
-        """add commit, if appropriate, flush."""
-        verbose = self.verbose or verbose
-        if not self.dirty and transactionid is None:
-            if verbose: print "commit not needed", transactionid
-            return
-        elif verbose:
-            print "attempting commit", transactionid
-        if transactionid is not None:
-            self.log( COMMIT, transactionid )
-            if verbose: print "committed", transactionid
-        if verbose: print "flushing", self.filename
-        self.file.flush()
-        self.dirty = 0
-
-    def recover(self, db, restart=1):
-        import sys
-        verbose = self.verbose
-        filename = self.filename
-        if verbose:
-            print "attempting recovery from", self.filename
-        file = self.file
-        if file is not None:
-            if verbose: print "closing file"
-            self.file.close()
-            self.file = None
-        if verbose:
-            print "opens should generate an error if no recovery needed"
-        try:
-            file = open(filename, "rb")
-            file2 = open(self.backupname, "rb")
-        except:
-            if verbose:
-                print "no recovery needed:", filename
-                print sys.exc_type, sys.exc_value
-            sys.exc_traceback = None
-            return
-        file2.close()
-        if verbose: print "log found, recovering from", filename
-        records = self.read_records(file)
-        if verbose: print "scan for commit records"
-        commits = {}
-        for (i, (tid, op)) in records:
-            if op==COMMIT:
-                if verbose: print "transaction", tid, "commit at", i
-                commits[tid] = i
-            elif verbose:
-                print i, tid, "operation\n", op
-        if verbose: print commits, "commits total"
-        if verbose: print "applying commited operations, in order"
-        committed = commits.has_key
-        from types import StringType
-        for (i, (tid, op)) in records:
-            if tid is None or (committed(tid) and commits[tid]>i):
-                if type(op) is StringType:
-                    if verbose:
-                        print "skipping marker", tid, op
-                if verbose:
-                    print "executing for", tid, i
-                    print op
-                #### Note: silently eat errors unless verbose
-                ### (eg in case of table recreation...)
-                ### There should be a better way to do this!!!
-                import sys
-                try:
-                    op.relbind(db)
-                    op.eval()
-                except:
-                    if verbose:
-                        print "error", sys.exc_type, sys.exc_value
-                        print "binding or evaluating logged operation:"
-                        print op
-            elif verbose:
-                print "uncommitted operation", tid, i
-                op
-        if verbose:
-            print "recovery successful: clearing log file"
-        self.clear()
-        if restart:
-            if verbose:
-                print "recreating empty log file"
-            self.startup()
-
-    def read_records(self, file):
-        """return log record as (index, (tid, op)) list"""
-        verbose = self.verbose
-        if verbose: print "reading log records to error"
-        import sys
-        records = {}
-        from sqlsem import deserialize
-        count = 0
-        while 1:
-            try:
-                data = checksum_undump(file)
-            except:
-                if verbose:
-                    print "record read terminated with error", len(records)
-                    print sys.exc_type, sys.exc_value
-                break
-            (transactionid, serial) = data
-            operation = deserialize(serial)
-            records[count] = (transactionid, operation)
-            if verbose:
-                print count, ": read for", transactionid
-                print operation
-            count = count+1
-        if verbose: print len(records), "records total"
-        records = records.items()
-        records.sort()
-        return records
-
-    def dump(self):
-        verbose = self.verbose
-        self.shutdown()
-        print "dumping log"
-        self.verbose = 1
-        try:
-            file = open(self.filename, "rb")
-        except:
-            print "DUMP FAILED, cannot open", self.filename
-        else:
-            self.read_records(file)
-        self.verbose = verbose
-        self.restart()

Copied: zope.rdb/tags/3.4.1/src/zope/rdb/gadfly/gfdb0.py (from rev 91892, zope.rdb/trunk/src/zope/rdb/gadfly/gfdb0.py)
===================================================================
--- zope.rdb/tags/3.4.1/src/zope/rdb/gadfly/gfdb0.py	                        (rev 0)
+++ zope.rdb/tags/3.4.1/src/zope/rdb/gadfly/gfdb0.py	2008-10-10 16:53:36 UTC (rev 91998)
@@ -0,0 +1,1417 @@
+"""storage objects"""
+
+verbosity = 0
+
+import os
+
+try:
+    from hashlib import md5
+except ImportError:
+    # Python 2.4 and earlier
+    from md5 import md5
+
+# use whatever kjbuckets sqlsem is using
+#from sqlsem import kjbuckets, maketuple
+
+# error on checking of data integrity
+StorageError = "StorageError"
+
+# use md5 checksum (stub if md5 unavailable?)
+def checksum(string):
+    return md5(string).digest()
+
+def recursive_dump(data, prefix="["):
+    """for debugging"""
+    from types import StringType
+    if type(data) is StringType:
+        #print prefix, data
+        return
+    p2 = prefix+"["
+    try:
+        for x in data:
+            recursive_dump(x, p2)
+    except:
+        print prefix, data
+
+def checksum_dump(data, file):
+    """checksum and dump marshallable data to file"""
+    #print "checksum_dump", file
+    #recursive_dump(data)
+    from marshal import dumps, dump
+    #print "data\n",data
+    storage = dumps(data)
+    checkpair = (checksum(storage), storage)
+    dump(checkpair, file)
+
+def checksum_undump(file):
+    """undump marshallable data from file, checksum"""
+    from marshal import load, loads
+    checkpair = load(file)
+    (check, storage) = checkpair
+    if checksum(storage)!=check:
+        raise StorageError, "data load checksum fails"
+    data = loads(storage)
+    return data
+
+def backup_file(filename, backupname):
+    """backup file, if unopenable ignore"""
+    try:
+        f = open(filename, "rb")
+    except:
+        return
+    data = f.read()
+    f.close()
+    f = open(backupname, "wb")
+    f.write(data)
+    f.close()
+
+def del_file(filename):
+    """delete file, ignore errors"""
+    from os import unlink
+    try:
+        unlink(filename)
+    except:
+        pass
+
+class Database0:
+    """quick and dirty in core database representation."""
+
+    # db.log is not None == use db.log to log modifications
+
+    # set for verbose prints
+    verbose = verbosity
+
+    # set for read only copy
+    readonly = 0
+
+    # set for temp/scratch db copy semantics
+    is_scratch = 0
+
+    # set to add introspective tables
+    introspect = 1
+
+    def __init__(self, shadowing=None, log=None):
+        """dictionary of relations."""
+        verbose = self.verbose
+        self.shadowing = shadowing
+        self.log = log
+        self.touched = 0
+        if log:
+            self.is_scratch = log.is_scratch
+        if shadowing and not log:
+            raise ValueError, "shadowing db requires log"
+        if verbose:
+            print "Database0 init"
+            if log:
+                log.verbose = 1
+        if shadowing:
+            # shadow structures of shadowed db
+            self.rels = shadow_dict(shadowing.rels, Relation0.unshadow)
+            self.datadefs = shadow_dict(shadowing.datadefs)
+            self.indices = shadow_dict(shadowing.indices)
+        else:
+            self.rels = {}
+            self.datadefs = {}
+            self.indices = {}
+            if self.introspect:
+                self.set_introspection()
+
+    def set_introspection(self):
+        import gfintrospect
+        self["dual"] = gfintrospect.DualView()
+        self["__table_names__"] = gfintrospect.RelationsView()
+        self["__datadefs__"] = gfintrospect.DataDefsView()
+        self["__indices__"] = gfintrospect.IndicesView()
+        self["__columns__"] = gfintrospect.ColumnsView()
+        self["__indexcols__"] = gfintrospect.IndexAttsView()
+
+    def reshadow(self, db, dblog):
+        """(re)make self into shadow of db with dblog"""
+        self.shadowing = db
+        self.log = dblog
+        self.rels = shadow_dict(db.rels, Relation0.unshadow)
+        self.datadefs = shadow_dict(db.datadefs)
+        self.indices = shadow_dict(db.indices)
+
+    def clear(self):
+        """I'm not sure if database has circular structure, so this added"""
+        self.shadowing = None
+        self.log = None
+        self.rels = {}
+        self.datadefs = {}
+        self.indices = {}
+
+    def commit(self):
+        """commit shadowed changes"""
+        verbose = self.verbose
+        if self.shadowing and self.touched:
+            # log commit handled elsewhere
+            #log = self.log
+            #if log and not log.is_scratch:
+               #if verbose: print "committing log"
+               #self.log.commit(verbose)
+            if verbose: print "committing rels"
+            self.rels.commit(verbose)
+            if verbose: print "committing datadefs"
+            self.datadefs.commit(verbose)
+            if verbose: print "committing indices"
+            self.indices.commit(verbose)
+            st = self.shadowing.touched
+            if not st:
+                if verbose: "print setting touched", self.touched
+                self.shadowing.touched = self.touched
+            elif verbose:
+                print "shadowed database is touched"
+        elif verbose:
+            print "db0: commit on nonshadow instance"
+
+    def __setitem__(self, name, relation):
+        """bind a name (uppercased) to tuples as a relation."""
+        from string import upper
+        if self.indices.has_key(name):
+            raise NameError, "cannot set index"
+        self.rels[ upper(name) ] = relation
+        if self.verbose: print "db0 sets rel", name
+
+    def add_index(self, name, index):
+        if self.rels.has_key(name):
+            raise NameError, `name`+": is relation"
+        self.indices[name] = index
+        if self.verbose: print "db0 sets index", name
+
+    def drop_index(self, name):
+        if self.verbose: print "db0 drops index", name
+        del self.indices[name]
+
+    def __getitem__(self, name):
+        if self.verbose: print "db0 gets rel", name
+        from string import upper
+        return self.rels[upper(name)]
+
+    def get_for_update(self, name):
+        """note: does not imply updates, just possibility of them"""
+        verbose = self.verbose
+        if verbose: print "db0 gets rel for update", name
+        shadowing = self.shadowing
+        gotit = 0
+        from string import upper
+        name = upper(name)
+        rels = self.rels
+        if shadowing:
+            if rels.is_shadowed(name):
+                test = rels[name]
+                # do we really have a shadow or a db copy?
+                if test.is_shadow:
+                    gotit = 1
+            if not gotit:
+                if shadowing.has_relation(name):
+                    test = shadowing.get_for_update(name)
+                else:
+                    # uncommitted whole relation
+                    test = rels[name]
+                    gotit = 1
+        else:
+            test = rels[name]
+            gotit = 1
+        if self.readonly:
+            raise ValueError, "cannot update, db is read only"
+        elif test.is_view:
+            raise ValueError, "VIEW %s cannot be updated" % name
+        elif shadowing and not gotit:
+            if verbose: print "db0: making shadow for", name
+            if test.is_shadow: return test
+            shadow = Relation0(())
+            shadow = shadow.shadow(test, self.log, name, self)
+            rels[name] = shadow
+            return shadow
+        else:
+            return test
+
+    def __delitem__(self, name):
+        if self.verbose: print "db0 drops rel", name
+        from string import upper
+        del self.rels[upper(name)]
+
+    def relations(self):
+        return self.rels.keys()
+
+    def has_relation(self, name):
+        return self.rels.has_key(name)
+
+    def getdatadefs(self):
+        result = self.datadefs.values()
+        # sort to make create tables first, eg
+        result.sort()
+        return result
+
+    def add_datadef(self, name, defn, logit=1):
+        """only log the datadef if logit is set, else ignore redefinitions"""
+        dd = self.datadefs
+        if logit and dd.has_key(name):
+            raise KeyError, `name`+": already defined"
+        if logit:
+            self.touched = 1
+        dd[name] = defn
+
+    def has_datadef(self, name):
+        return self.datadefs.has_key(name)
+
+    def drop_datadef(self, name):
+        if self.verbose: print "db0 drops datadef",name
+        dd = self.datadefs
+        #print dd.keys()
+        if not dd.has_key(name):
+            raise KeyError, `name`+": no such element"
+        del dd[name]
+
+    def __repr__(self):
+        l = []
+        from string import join
+        l.append("INDICES: "+`self.indices.keys()`)
+        for (name, ddef) in self.datadefs.items():
+            l.append("data definition %s::\n%s" % (name, ddef))
+        for (name, rel) in self.rels.items():
+            l.append(name + ":")
+            l.append(rel.irepr())
+        return join(l, "\n\n")
+
+    def bindings(self, fromlist):
+        """return (attdict, reldict, amb, ambatts) from fromlist = [(name,alias)...]
+           where reldict: alias > tuplelist
+                 attdict: attribute_name > unique_relation
+                 amb: dict of dottedname > (rel, att)
+                 ambatts: dict of ambiguous_name > witness_alias
+        """
+        from string import upper
+        rels = self.rels
+        ambiguous_atts = {}
+        ambiguous = {}
+        relseen = {}
+        attbindings = {}
+        relbindings = {}
+        for (name,alias) in fromlist:
+            name = upper(name)
+            alias = upper(alias)
+            if relseen.has_key(alias):
+                raise NameError, `alias` + ": bound twice in from list"
+            relseen[alias]=alias
+            try:
+                therel = rels[name]
+            except KeyError:
+                raise NameError, `name` + " no such relation in DB"
+            relbindings[alias] = therel
+            for attname in therel.attributes():
+                if not ambiguous_atts.has_key(attname):
+                    if attbindings.has_key(attname):
+                        oldrel = attbindings[attname]
+                        oldbind = (oldrel, attname)
+                        ambiguous[ "%s.%s" % oldbind] = oldbind
+                        del attbindings[attname]
+                        ambiguous_atts[attname]=alias
+                        newbind = (alias, attname)
+                        ambiguous[ "%s.%s" % newbind ] = newbind
+                    else:
+                        attbindings[attname] = alias
+                else:
+                    newbind = (alias, attname)
+                    ambiguous[ "%s.%s" % newbind ] = newbind
+        return (attbindings, relbindings, ambiguous, ambiguous_atts)
+
+class File_Storage0:
+    """quick and dirty file storage mechanism.
+         relation names in directory/dbname.gfd
+           contains a white separated list of relation names
+         relations in directory/relname.grl
+           contains sequence of marshalled tuples reps
+           prefixed by marshalled list of atts
+    """
+
+    verbose = verbosity
+
+    def __init__(self, dbname, directory):
+        """directory must exist."""
+        if self.verbose: print "fs0 init:", dbname, directory
+        self.dbname = dbname
+        self.directory = directory
+        self.relation_implementation = Relation0
+        self.recovery_mode = 0
+
+    def load(self, parser=None, forscratch=0):
+        # if logfile is present, need to recover
+        # error condition: fail to load relation, ddf, but no log file!
+        logfile = self.logfilename()
+        blogfile = self.backup_logfilename()
+        verbose = self.verbose
+        if verbose: print "fs0 load, checking", logfile
+        try:
+            testlog = open(logfile, "rb")
+            if verbose: print "fs0: opened", testlog
+            testlog.close()
+            testlog = open(blogfile, "rb")
+            testlog.close()
+            testlog = None
+        except:
+            recovery_mode = self.recovery_mode = 0
+            if verbose: print "recovery not needed"
+        else:
+            recovery_mode = self.recovery_mode = 1
+            if verbose: print "FS0 RECOVERY MODE LOAD!"
+        resultdb = Database0()
+        resultdb.is_scratch = forscratch
+        commands = self.get_initstatements()
+        #commands = parser.DoParse1(initstatements)
+        for command in commands:
+            if verbose: print "fs0 evals", command
+            command.relbind(resultdb)
+            command.eval()
+        for name in resultdb.relations():
+            if verbose: print "fs0 loads rel", name
+            rel = resultdb[name]
+            if rel.is_view:
+                # don't need to load views
+                continue
+            rel.set_empty()
+            try:
+                data = self.get_relation(name)
+            except StorageError, detail:
+                raise StorageError, "load failure %s: %s" % (name, detail)
+            attsin = tuple(data.attributes())
+            attsout = tuple(rel.attributes())
+            if attsin!=attsout:
+                raise StorageError, "rel %s: atts %s don't match %s" % (
+                   name, attsin, attsout)
+            rel.add_tuples( data.rows() )
+            # in sync!
+            rel.touched = 0
+        # db in sync
+        resultdb.touched = 0
+        # do recovery, if needed
+        if recovery_mode:
+            if verbose: print "fs0 recovering from logfile", logfile
+            # restart the log file only if db is not scratch
+            restart = not forscratch
+            Log = DB_Logger(logfile, blogfile)
+            if verbose: Log.verbose=1
+            Log.recover(resultdb, restart)
+            # do a checkpoint
+            self.recovery_mode = 0
+            if restart and not forscratch:
+                Log.shutdown()
+                Log = None
+                del_file(logfile)
+                if verbose: print "FS0: dumping database"
+                self.dump(resultdb)
+                Log = resultdb.log = DB_Logger(logfile, blogfile)
+                Log.startup()
+        elif not forscratch:
+            Log = DB_Logger(logfile, blogfile)
+            Log.startup()
+            resultdb.log = Log
+        return resultdb
+
+    def relfilename(self, name):
+        #return "%s/%s.grl" % (self.directory, name)
+        return os.path.join(self.directory, name+".grl")
+
+    def backup_relfilename(self, name):
+        #return "%s/%s.brl" % (self.directory, name)
+        return os.path.join(self.directory, name+".brl")
+
+    def relfile(self, name, mode="rb"):
+        if self.recovery_mode:
+            return self.getfile_fallback(
+         self.backup_relfilename(name), self.relfilename(name), mode)
+        else:
+            name = self.relfilename(name)
+            return open(name, mode)
+
+    def getfile_fallback(self, first, second, mode):
+        try:
+            return open(first, mode)
+        except:
+            return open(second, mode)
+
+    def get_relation(self, name):
+        f = self.relfile(name, "rb")
+        rel = self.relation_implementation(())
+        try:
+            rel.load(f)
+        except StorageError:
+            if self.recovery_mode:
+                f = open(self.relfilename(name), "rb")
+                rel.load(f)
+            else:
+                raise StorageError, \
+   "fs: could not unpack backup rel file or rel file in recovery mode: "+name
+        return rel
+
+    def dbfilename(self):
+        #return "%s/%s.gfd" % (self.directory, self.dbname)
+        return os.path.join(self.directory, self.dbname+".gfd")
+
+    def backup_dbfilename(self):
+        #return "%s/%s.bfd" % (self.directory, self.dbname)
+        return os.path.join(self.directory, self.dbname+".bfd")
+
+    def logfilename(self):
+        #return "%s/%s.gfl" % (self.directory, self.dbname)
+        return os.path.join(self.directory, self.dbname+".gfl")
+
+    def backup_logfilename(self):
+        #return "%s/%s.glb" % (self.directory, self.dbname)
+        return os.path.join(self.directory, self.dbname+".glb")
+
+    def get_initstat_file(self, mode):
+        if self.recovery_mode:
+            return self.getfile_fallback(
+             self.backup_dbfilename(), self.dbfilename(), mode)
+        else:
+            return open(self.dbfilename(), mode)
+
+    def get_initstatements(self):
+        f = self.get_initstat_file("rb")
+        if self.verbose:
+            print "init statement from file", f
+        try:
+            data = checksum_undump(f)
+        except StorageError:
+            if self.recovery_mode:
+                f = open(self.dbfilename, "rb")
+                data = checksum_undump(f)
+            else:
+                raise StorageError, \
+   "could not unpack ddf backup or ddf file in recovery mode: "+self.dbname
+        f.close()
+        from sqlsem import deserialize
+        stats = map(deserialize, data)
+        return stats
+
+    def dump(self, db):
+        """perform a checkpoint (no active transactions!)"""
+        # db should be non-shadowing db
+        # first thing: back up the log
+        backup_file(self.logfilename(), self.backup_logfilename())
+        verbose = self.verbose
+        if verbose: print "fs0: checkpointing db"
+        if db.is_scratch or db.readonly:
+            # don't need to do anything.
+            if verbose: print "fs0: scratch or readonly, returning"
+            return
+        log = db.log
+        if log:
+            log.commit()
+            if verbose:
+                print "DEBUG LOG TRACE"
+                log.dump()
+            log.shutdown()
+        if db.touched:
+            if verbose: print "fs0: db touched, backing up ddf file"
+            backup_file(self.dbfilename(),
+                        self.backup_dbfilename())
+        relations = db.relations()
+        for r in relations:
+            rel = db[r]
+            #print r
+            if rel.touched:
+                if verbose: print "fs0: backing up touched rel", r
+                backup_file(self.relfilename(r),
+                            self.backup_relfilename(r))
+        for r in relations:
+            if verbose: print "fs0: dumping relations now"
+            self.dumprelation(r, db[r])
+        if verbose: print "fs0: dumping datadefs now"
+        self.dumpdatadefs(db)
+        # del of logfile signals successful commit.
+        if verbose: print "fs0: successful dump, deleting log file"
+        logfilename = self.logfilename()
+        blogfilename = self.backup_logfilename()
+        del_file(logfilename)
+        del_file(blogfilename)
+        if db.touched:
+            if verbose: print "fs0: deleting backup ddf file"
+            del_file(self.backup_dbfilename())
+            db.touched = 0
+        for r in relations:
+            rel = db[r]
+            if rel.touched:
+                if verbose: print "fs0: deleting rel backup", r
+                del_file(self.backup_relfilename(r))
+            rel.touched = 0
+        if verbose: print "fs0: restarting db log"
+        log = db.log = DB_Logger(logfilename, blogfilename)
+        log.startup()
+        if verbose: print "fs0: dump complete"
+        self.recovery_mode = 0
+
+    def dumprelation(self, name, rel, force=0):
+        """set force to ignore the "touch" flag."""
+        # ignore self.backup_mode
+        if (force or rel.touched) and not rel.is_view:
+            fn = self.relfilename(name)
+            if self.verbose:
+                print "dumping touched rel", name, "to", fn
+            f = open(fn, "wb")
+            rel.dump(f)
+
+    def dumpdatadefs(self, db, force=0):
+        """set force to ignore the touch flag"""
+        # ignore self.backup_mode
+        if not (force or db.touched): return
+        #from marshal import dump, dumps
+        fn = self.dbfilename()
+        f = open(fn, "wb")
+        datadefs = db.getdatadefs()
+        from sqlsem import serialize
+        datadefsd = map(serialize, datadefs)
+        #for (defn, ser) in map(None, datadefs, datadefsd):
+            #print defn
+            #print ser
+            #dumps(ser)  ### debug test
+        checksum_dump(datadefsd, f)
+        f.close()
+
+class Relation0:
+    """quick and dirty in core relation representation.
+         self.tuples contains tuples or 0 if erased.
+       tuples must not move (to preserve indices)
+       unless indices regenerate.
+    """
+
+    is_view = 0 # Relation0 is not a view
+
+    def __init__(self, attribute_names, tuples=None, filter=None):
+        from sqlsem import kjbuckets
+        self.indices = kjbuckets.kjGraph()
+        self.index_list = []
+        self.attribute_names = attribute_names
+        if tuples is None:
+            tuples = []
+        self.filter = filter
+        self.set_empty()
+        self.add_tuples(tuples)
+        # indices map attname > indices containing att
+        # relation to shadow and log (if non-null)
+        self.log = None
+        self.name = None # anonymous by default
+        self.is_shadow = 0
+        self.touched = 0
+
+    def shadow(self, otherrelation, log, name, inshadowdb):
+        """return structural replica of otherrelation (as self)
+
+           for non-updatable relation (eg, view) may return otherrelation"""
+        if otherrelation.is_view:
+            # for now, assume VIEWS CANNOT BE UPDATED
+            return otherrelation
+        self.is_shadow = 1
+        self.shadow_of_shadow = otherrelation.is_shadow
+        self.log = log
+        self.name = name
+        # don't make any updates permanent if set.
+        self.tuples = otherrelation.tuples[:]
+        self.attribute_names = otherrelation.attribute_names
+        self.filter = otherrelation.filter
+        for index in otherrelation.index_list:
+            copy = index.copy()
+            name = copy.name
+            self.add_index(copy, recordtuples=0)
+            # record in shadowdb, but don't log it
+            inshadowdb.add_index(name, copy)
+            #inshadowdb.add_datadef(name, copy, logit=0)
+        self.touched = otherrelation.touched
+        return self
+
+    def unshadow(self):
+        """make self into a replacement for shadowed, return self."""
+        if self.is_shadow:
+            self.log = None
+            self.is_shadow = self.shadow_of_shadow
+        return self
+
+    def dump(self, file):
+        attributes = tuple(self.attributes())
+        rows = self.rows()
+        newrows = rows[:]
+        count = 0
+        tt = type
+        from types import IntType
+        for i in xrange(len(rows)):
+            this = rows[i]
+            if this is not None and tt(this) is not IntType:
+                newrows[count] = rows[i].dump(attributes)
+                count = count + 1
+        newrows = newrows[:count]
+        newrows.append(attributes)
+        checksum_dump(newrows, file)
+
+    def load(self, file):
+        """checksum must succeed."""
+        rows = checksum_undump(file)
+        attributes = rows[-1]
+        self.attribute_names = attributes
+        rows = rows[:-1]
+        from sqlsem import kjbuckets
+        undump = kjbuckets.kjUndump
+        for i in xrange(len(rows)):
+            rows[i] = undump(attributes, rows[i])
+        self.set_empty()
+        self.add_tuples(rows)
+        # in sync with disk copy!
+        self.touched = 0
+
+    def add_index(self, index, recordtuples=1):
+        """unset recordtuples if the index is initialized already."""
+        # does not "touch" the relation
+        index_list = self.index_list
+        indices = self.indices
+        atts = index.attributes()
+        for a in atts:
+            indices[a] = index
+        if recordtuples:
+            (tuples, seqnums) = self.rows(1)
+            index.clear()
+            if tuples:
+                index.add_tuples(tuples, seqnums)
+        index_list.append(index)
+
+    def drop_index(self, index):
+        # does not "touch" the relation
+        name = index.name
+        if verbosity:
+            print "rel.drop_index", index
+            print "...", self.indices, self.index_list
+        indices = self.indices
+        for a in index.attributes():
+            # contorted since one index be clone of the other.
+            aindices = indices.neighbors(a)
+            for ind in aindices:
+                if ind.name == name:
+                    indices.delete_arc(a, ind)
+                    theind = ind
+        # the (non-clone) index ought to have been found above...
+        self.index_list.remove(theind)
+
+    def choose_index(self, attributes):
+        """choose an index including subset of attributes or None"""
+        from sqlsem import kjbuckets
+        kjSet = kjbuckets.kjSet
+        atts = kjSet(attributes)
+        #print "choosing index", atts
+        indices = (atts * self.indices).values()
+        choice = None
+        for index in indices:
+            indexatts = index.attributes()
+            #print "index atts", indexatts
+            iatts = kjSet(indexatts)
+            if iatts.subset(atts):
+                if choice is None:
+                    #print "chosen", index.name
+                    choice = index
+                    lchoice = len(choice.attributes())
+                else:
+                    if index.unique or lchoice<len(indexatts):
+                        choice = index
+                        lchoice = len(choice.attributes())
+        return choice
+
+    def __repr__(self):
+        rows = self.rows()
+        atts = self.attributes()
+        list_rep = [list(atts)]
+        for r in rows:
+            rlist = []
+            for a in atts:
+                try:
+                    elt = r[a]
+                except KeyError:
+                    elt = "NULL"
+                else:
+                    elt = str(elt)
+                rlist.append(elt)
+            list_rep.append(rlist)
+        # compute maxen for formatting
+        maxen = [0] * len(atts)
+        for i in xrange(len(atts)):
+            for l in list_rep:
+                maxen[i] = max(maxen[i], len(l[i]))
+        for i in xrange(len(atts)):
+            mm = maxen[i]
+            for l in list_rep:
+                old = l[i]
+                l[i] = old + (" " * (mm-len(old)))
+        from string import join
+        for i in xrange(len(list_rep)):
+            list_rep[i] = join(list_rep[i], " | ")
+        first = list_rep[0]
+        list_rep.insert(1, "=" * len(first))
+        return join(list_rep, "\n")
+
+    def irepr(self):
+        List = [self] + list(self.index_list)
+        List = map(str, List)
+        from string import join
+        return join(List, "\n")
+
+    def set_empty(self):
+        self.tuples = []
+        for index in self.index_list:
+            index.clear()
+
+    def drop_indices(self, db):
+        for index in self.index_list:
+            name = index.name
+            db.drop_datadef(name)
+            db.drop_index(name)
+        self.index_list = []
+        from sqlsem import kjbuckets
+        self.indices = kjbuckets.kjGraph()
+
+    def regenerate_indices(self):
+        (tuples, seqnums) = self.rows(1)
+        #self.tuples = tuples
+        for index in self.index_list:
+            index.clear()
+            index.add_tuples(tuples, seqnums)
+
+    def add_tuples(self, tuples):
+        if not tuples: return
+        tuples = filter(self.filter, tuples)
+        oldtuples = self.tuples
+        first = len(oldtuples)
+        oldtuples[first:] = list(tuples)
+        last = len(oldtuples)
+        for index in self.index_list:
+            index.add_tuples(tuples, xrange(first,last))
+        self.touched = 1
+
+    def attributes(self):
+        return self.attribute_names
+
+    def rows(self, andseqnums=0):
+        tups = self.tuples
+        # short cut
+        if 0 not in tups:
+            if andseqnums:
+                return (tups, xrange(len(tups)))
+            else:
+                return tups
+        tt = type
+        from types import IntType
+        result = list(self.tuples)
+        if andseqnums: seqnums = result[:]
+        count = 0
+        for i in xrange(len(result)):
+            t = result[i]
+            if tt(t) is not IntType:
+                result[count] = t
+                if andseqnums: seqnums[count] = i
+                count = count+1
+        result = result[:count]
+        if andseqnums:
+            return (result, seqnums[:count])
+        else:
+            return result
+
+    def erase_tuples(self, seqnums):
+        #print "et seqnums", seqnums
+        if not seqnums: return
+        tups = self.tuples
+        # order important! indices first!
+        for index in self.index_list:
+            index.erase_tuples(seqnums, tups)
+        for i in seqnums:
+            #print "deleting", i
+            tups[i] = 0
+        #print self
+        self.touched = 1
+
+    def reset_tuples(self, tups, seqnums):
+        # KISS for indices, maybe optimize someday...
+        if not tups: return
+        mytups = self.tuples
+        for index in self.index_list:
+            index.erase_tuples(seqnums, mytups)
+        for i in xrange(len(seqnums)):
+            seqnum = seqnums[i]
+            mytups[seqnum] = tups[i]
+        for index in self.index_list:
+            index.add_tuples(tups, seqnums)
+        self.touched = 1
+
+# should views be here?
+
+class View(Relation0):
+    """view object, acts like relation, with addl operations."""
+    touched = 0
+    is_view = 1
+    is_shadow = 0
+
+    ### must fix namelist!
+
+    def __init__(self, name, namelist, selection, indb):
+        """set namelist to None for implicit namelist"""
+        self.name = name
+        self.namelist = namelist
+        self.selection = selection
+        # attempt a relbind, no outer bindings!
+        self.relbind(indb, {})
+        self.cached_rows = None
+        self.translate = None
+
+    def __repr__(self):
+        return "view %s as %s" % (self.name, self.selection)
+
+    irepr = __repr__
+
+    def uncache(self):
+        self.cached_rows = None
+
+    def UNDEFINED_OP_FOR_VIEW(*args, **kw):
+        raise ValueError, "operation explicitly undefined for view object"
+
+    shadow = dump = load = add_index = drop_index = set_empty = \
+    add_tuples = erase_tuples = reset_tuples = UNDEFINED_OP_FOR_VIEW
+
+    def ignore_op_for_view(*args, **kw):
+        """ignore this op when applied to view"""
+        pass
+
+    drop_indices = regenerate_indices = ignore_op_for_view
+
+    def choose_index(s, a):
+        """no indices on views (might change this?)"""
+        return None
+
+    def relbind(self, db, atts):
+        """bind self to db, ignore atts"""
+        name = self.name
+        selection = self.selection
+        selection = self.selection = selection.relbind(db)
+        namelist = self.namelist
+        if namelist is not None:
+            from sqlsem import kjbuckets
+            target_atts = selection.attributes()
+            if len(namelist)!=len(target_atts):
+                raise "select list and namelist don't match in %s"%name
+            pairs = map(None, namelist, target_atts)
+            self.translate = kjbuckets.kjGraph(pairs)
+        return self
+
+    def attributes(self):
+        namelist = self.namelist
+        if self.namelist is None:
+            return self.selection.attributes()
+        return namelist
+
+    def rows(self, andseqs=0):
+        cached_rows = self.cached_rows
+        if cached_rows is None:
+            cached_rows = self.cached_rows = self.selection.eval().rows()
+            if self.namelist is not None:
+                # translate the attribute names
+                translate = self.translate
+                for i in range(len(cached_rows)):
+                    cached_rows[i] = cached_rows[i].remap(translate)
+        if andseqs:
+            return (cached_rows[:], range(len(cached_rows)))
+        else:
+            return cached_rows[:]
+
+class Index:
+    """Index for tuples in relation.  Tightly bound to relation rep."""
+
+    ### should add "unique index" and check enforce uniqueness...
+
+    def __init__(self, name, attributes, unique=0):
+        self.unique = unique
+        self.name = name
+        self.atts = tuple(attributes)
+        # values > tuples
+        self.index = {}
+        self.dseqnums = {}
+
+    def __repr__(self):
+        un = ""
+        if self.unique: un="UNIQUE "
+        return "%sindex %s on %s" % (un, self.name, self.atts)
+
+    def copy(self):
+        """make a fast structural copy of self"""
+        result = Index(self.name, self.atts, unique=self.unique)
+        rindex = result.index
+        rdseqnums = result.dseqnums
+        myindex = self.index
+        mydseqnums = self.dseqnums
+        for k in myindex.keys():
+            rindex[k] = myindex[k][:]
+        for k in mydseqnums.keys():
+            rdseqnums[k] = mydseqnums[k][:]
+        return result
+
+    def attributes(self):
+        return self.atts
+
+    def matches(self, tuple, translate=None):
+        """return (tuples, seqnums) for tuples matching tuple
+           (with possible translations"""
+        if translate:
+            tuple = translate * tuple
+        atts = self.atts
+        dump = tuple.dump(atts)
+        index = self.index
+        if index.has_key(dump):
+            return (index[dump], self.dseqnums[dump])
+        else:
+            return ((), ())
+
+    def clear(self):
+        self.index = {}
+        self.dseqnums = {}
+
+    def add_tuples(self, tuples, seqnums):
+        unique = self.unique
+        atts = self.atts
+        index = self.index
+        dseqnums = self.dseqnums
+        test = index.has_key
+        for i in xrange(len(tuples)):
+            tup = tuples[i]
+            seqnum = seqnums[i]
+            dump = tup.dump(atts)
+            #print self.name, dump
+            if test(dump):
+                bucket = index[dump]
+                #print "self", self
+                #print "unique", unique
+                #print "bucket", bucket
+                if unique and bucket:
+                    raise StorageError, "uniqueness violation: %s %s" %(
+                      dump, self)
+                bucket.append(tup)
+                dseqnums[dump].append(seqnum)
+            else:
+                index[dump] = [tup]
+                dseqnums[dump] = [seqnum]
+
+    def erase_tuples(self, seqnums, all_tuples):
+        # all_tuples must be internal rel tuple list
+        atts = self.atts
+        index = self.index
+        dseqnums = self.dseqnums
+        for seqnum in seqnums:
+            tup = all_tuples[seqnum]
+            dump = tup.dump(atts)
+            index[dump].remove(tup)
+            dseqnums[dump].remove(seqnum)
+
+class shadow_dict:
+    """shadow dictionary. defer & remember updates."""
+    verbose = verbosity
+    def __init__(self, shadowing, value_transform=None):
+        self.shadowed = shadowing
+        shadow = self.shadow = {}
+        self.touched = {}
+        for key in shadowing.keys():
+            shadow[key] = shadowing[key]
+        self.value_transform = value_transform
+        # defeats inheritance! careful!
+        self.values = shadow.values
+        self.items = shadow.items
+        self.keys = shadow.keys
+        self.has_key = shadow.has_key
+
+    def is_shadowed(self, name):
+        return self.touched.has_key(name)
+
+    def __len__(self):
+        return len(self.shadow)
+
+    def commit(self, verbose=0):
+        """apply updates to shadowed."""
+        import sys
+        verbose = verbose or self.verbose
+        if self.touched:
+            shadowed = self.shadowed
+            shadow = self.shadow
+            value_transform = self.value_transform
+            keys = shadowed.keys()
+            if verbose:
+                print "shadowdict oldkeys", keys
+            for k in keys:
+                del shadowed[k]
+            keys = shadow.keys()
+            if verbose:
+                print "shadowdict newkeys", keys
+            for k in shadow.keys():
+                value = shadow[k]
+                if value_transform is not None:
+                    try:
+                        value = value_transform(value)
+                    except:
+                        raise "transform fails", (sys.exc_type, sys.exc_value, k, value)
+                shadowed[k] = value
+            self.touched = {}
+
+    def __getitem__(self, key):
+        return self.shadow[key]
+
+    def __setitem__(self, key, item):
+        from types import StringType
+        if type(key) is not StringType:
+            raise "nonstring", key
+        if item is None:
+            raise "none set", (key, item)
+        self.touched[key] = 1
+        self.shadow[key] = item
+
+    def __delitem__(self, key):
+        self.touched[key] = 1
+        del self.shadow[key]
+
+# stored mutations on relations
+class Add_Tuples:
+    """stored rel.add_tuples(tuples)"""
+    def __init__(self, name):
+        self.to_rel = name
+        self.indb = None
+    def initargs(self):
+        return (self.to_rel,)
+    def set_data(self, tuples, rel):
+        """store self.data as tuple with tuple[-1] as to_rel, rest data"""
+        attributes = tuple(rel.attributes())
+        ltuples = len(tuples)
+        data = list(tuples)
+        for i in xrange(ltuples):
+            tdata = tuples[i].dump(attributes)
+            data[i] = tdata
+        self.data = tuple(data)
+    def __repr__(self):
+        from string import join
+        datarep = map(repr, self.data)
+        datarep = join(datarep, "\n  ")
+        return "add tuples to %s\n  %s\n\n" % (self.to_rel, datarep)
+    def marshaldata(self):
+        return self.data
+    def demarshal(self, data):
+        self.data = data
+    def relbind(self, db):
+        self.indb = db
+    def eval(self, dyn=None):
+        """apply operation to db"""
+        db = self.indb
+        data = self.data
+        name = self.to_rel
+        rel = db[name]
+        attributes = tuple(rel.attributes())
+        tuples = list(data)
+        from sqlsem import kjbuckets
+        undump = kjbuckets.kjUndump
+        for i in xrange(len(tuples)):
+            tuples[i] = undump(attributes, tuples[i])
+        rel.add_tuples(tuples)
+
+class Erase_Tuples(Add_Tuples):
+    """stored rel.erase_tuples(seqnums)"""
+    def set_data(self, seqnums, rel):
+        seqnums = list(seqnums)
+        self.data = tuple(seqnums)
+    def __repr__(self):
+        return "Erase seqnums in %s\n  %s\n\n" % (self.to_rel, self.data)
+    def eval(self, dyn=None):
+        db = self.indb
+        seqnums = self.data
+        name = self.to_rel
+        rel = db[name]
+        rel.erase_tuples(seqnums)
+
+class Reset_Tuples(Add_Tuples):
+    """stored rel.reset_tuples(tups, seqnums)"""
+    def set_data(self, tups, seqnums, rel):
+        attributes = tuple(rel.attributes())
+        dtups = list(tups)
+        for i in xrange(len(dtups)):
+            dtups[i] = dtups[i].dump(attributes)
+        self.data = (tuple(dtups), tuple(seqnums))
+    def __repr__(self):
+        (dtups, seqnums) = self.data
+        pairs = map(None, seqnums, dtups)
+        from string import join
+        datarep = map(repr, pairs)
+        datarep = join(datarep, "  \n")
+        return "Reset tuples in %s\n  %s\n\n" % (self.to_rel, datarep)
+    def eval(self, dyn=None):
+        db = self.indb
+        (dtups, seqnums) = self.data
+        tups = list(dtups)
+        rel = db[self.to_rel]
+        attributes = tuple(rel.attributes())
+        from sqlsem import kjbuckets
+        undump = kjbuckets.kjUndump
+        for i in xrange(len(dtups)):
+            tups[i] = undump(attributes, dtups[i])
+        rel.reset_tuples(tups, seqnums)
+
+# Log entry tags
+START = "START"
+COMMIT = "COMMIT"
+ABORT = "ABORT"
+UNREADABLE = "UNREADABLE"
+
+class Transaction_Logger:
+    """quick and dirty Log implementation per transaction."""
+    verbose = verbosity
+
+    def __init__(self, db_log, transactionid, is_scratch=0):
+        self.db_log = db_log
+        self.transactionid = transactionid
+        # ignore all operations if set
+        self.is_scratch = is_scratch
+        self.dirty = 0
+        self.deferred = []
+
+    def reset(self):
+        self.deferred = []
+
+    def __repr__(self):
+        return "Transaction_Logger(%s, %s, %s)" % (
+           self.db_log, self.transactionid, self.is_scratch)
+
+    def log(self, operation):
+        verbose = self.verbose
+        tid = self.transactionid
+        if not self.is_scratch:
+            self.deferred.append(operation)
+            if verbose:
+                print "tid logs", tid, operation
+
+    def flush(self):
+        verbose = self.verbose
+        if not self.is_scratch:
+            tid = self.transactionid
+            deferred = self.deferred
+            self.deferred = []
+            db_log = self.db_log
+            if db_log:
+                for operation in deferred:
+                    db_log.log(operation, tid)
+            self.dirty = 1
+        elif verbose:
+            print "scratch log ignored", tid, operation
+
+    def commit(self, verbose=0):
+        verbose = self.verbose or verbose
+        tid = self.transactionid
+        if verbose: print "committing trans log", tid
+        if self.is_scratch:
+            if verbose:
+                print "scratch commit ignored", tid
+            return
+        if not self.dirty:
+            if verbose:
+                print "nondirty commit", tid
+            return
+        self.flush()
+        db_log = self.db_log
+        db_log.commit(verbose, tid)
+        if verbose:
+            print "transaction is considered recoverable", tid
+
+class DB_Logger:
+    """quick and dirty global db logger."""
+    verbose = verbosity
+    is_scratch = 0
+
+    def __init__(self, filename, backupname):
+        self.filename = filename
+        # backup name is never kept open: existence indicates log in use.
+        self.backupname = backupname
+        self.file = None
+        self.dirty = 0
+        if self.verbose:
+            print id(self), "created DB_Logger on", self.filename
+
+    def __repr__(self):
+        return "DB_Logger(%s)" % self.filename
+
+    def startup(self):
+        if self.verbose:
+            print id(self), "preparing", self.filename
+        # open happens automagically
+        #self.file = open(self.filename, "wb")
+        self.clear()
+        self.dirty = 0
+
+    def shutdown(self):
+        if self.verbose:
+            print id(self), "shutting down log", self.filename
+        file = self.file
+        if file:
+            file.close()
+        self.file = None
+
+    def clear(self):
+        if self.verbose:
+            print id(self), "clearing"
+        self.shutdown()
+        del_file(self.filename)
+
+    def restart(self):
+        if self.verbose:
+            print id(self), "restarting log file", self.filename
+        if self.file is not None:
+            self.file.close()
+        self.file = open(self.filename, "ab")
+        dummy = open(self.backupname, "ab")
+        dummy.close()
+        self.dirty = 0
+
+    def clear_log_file(self):
+        if self.verbose:
+            print id(self), "clearing logfile", self.filename
+        if self.file is not None:
+            self.file.close()
+            self.file = None
+        del_file(self.filename)
+        del_file(self.backupname)
+        self.dirty = 0
+
+    def log(self, operation, transactionid=None):
+        """transactionid of None means no transaction: immediate."""
+        file = self.file
+        if file is None:
+            self.restart()
+            file = self.file
+        verbose = self.verbose
+        from sqlsem import serialize
+        serial = serialize(operation)
+        data = (transactionid, serial)
+        if verbose:
+            print id(self), "logging:", transactionid
+            print operation
+        checksum_dump(data, file)
+        self.dirty = 1
+
+    def commit(self, verbose=0, transactionid=None):
+        """add commit, if appropriate, flush."""
+        verbose = self.verbose or verbose
+        if not self.dirty and transactionid is None:
+            if verbose: print "commit not needed", transactionid
+            return
+        elif verbose:
+            print "attempting commit", transactionid
+        if transactionid is not None:
+            self.log( COMMIT, transactionid )
+            if verbose: print "committed", transactionid
+        if verbose: print "flushing", self.filename
+        self.file.flush()
+        self.dirty = 0
+
+    def recover(self, db, restart=1):
+        import sys
+        verbose = self.verbose
+        filename = self.filename
+        if verbose:
+            print "attempting recovery from", self.filename
+        file = self.file
+        if file is not None:
+            if verbose: print "closing file"
+            self.file.close()
+            self.file = None
+        if verbose:
+            print "opens should generate an error if no recovery needed"
+        try:
+            file = open(filename, "rb")
+            file2 = open(self.backupname, "rb")
+        except:
+            if verbose:
+                print "no recovery needed:", filename
+                print sys.exc_type, sys.exc_value
+            sys.exc_traceback = None
+            return
+        file2.close()
+        if verbose: print "log found, recovering from", filename
+        records = self.read_records(file)
+        if verbose: print "scan for commit records"
+        commits = {}
+        for (i, (tid, op)) in records:
+            if op==COMMIT:
+                if verbose: print "transaction", tid, "commit at", i
+                commits[tid] = i
+            elif verbose:
+                print i, tid, "operation\n", op
+        if verbose: print commits, "commits total"
+        if verbose: print "applying commited operations, in order"
+        committed = commits.has_key
+        from types import StringType
+        for (i, (tid, op)) in records:
+            if tid is None or (committed(tid) and commits[tid]>i):
+                if type(op) is StringType:
+                    if verbose:
+                        print "skipping marker", tid, op
+                if verbose:
+                    print "executing for", tid, i
+                    print op
+                #### Note: silently eat errors unless verbose
+                ### (eg in case of table recreation...)
+                ### There should be a better way to do this!!!
+                import sys
+                try:
+                    op.relbind(db)
+                    op.eval()
+                except:
+                    if verbose:
+                        print "error", sys.exc_type, sys.exc_value
+                        print "binding or evaluating logged operation:"
+                        print op
+            elif verbose:
+                print "uncommitted operation", tid, i
+                op
+        if verbose:
+            print "recovery successful: clearing log file"
+        self.clear()
+        if restart:
+            if verbose:
+                print "recreating empty log file"
+            self.startup()
+
+    def read_records(self, file):
+        """return log record as (index, (tid, op)) list"""
+        verbose = self.verbose
+        if verbose: print "reading log records to error"
+        import sys
+        records = {}
+        from sqlsem import deserialize
+        count = 0
+        while 1:
+            try:
+                data = checksum_undump(file)
+            except:
+                if verbose:
+                    print "record read terminated with error", len(records)
+                    print sys.exc_type, sys.exc_value
+                break
+            (transactionid, serial) = data
+            operation = deserialize(serial)
+            records[count] = (transactionid, operation)
+            if verbose:
+                print count, ": read for", transactionid
+                print operation
+            count = count+1
+        if verbose: print len(records), "records total"
+        records = records.items()
+        records.sort()
+        return records
+
+    def dump(self):
+        verbose = self.verbose
+        self.shutdown()
+        print "dumping log"
+        self.verbose = 1
+        try:
+            file = open(self.filename, "rb")
+        except:
+            print "DUMP FAILED, cannot open", self.filename
+        else:
+            self.read_records(file)
+        self.verbose = verbose
+        self.restart()

Deleted: zope.rdb/tags/3.4.1/src/zope/rdb/gadfly/sqlbind.py
===================================================================
--- zope.rdb/trunk/src/zope/rdb/gadfly/sqlbind.py	2008-10-08 03:53:21 UTC (rev 91891)
+++ zope.rdb/tags/3.4.1/src/zope/rdb/gadfly/sqlbind.py	2008-10-10 16:53:36 UTC (rev 91998)
@@ -1,623 +0,0 @@
-"""rule bindings for sql grammar."""
-
-def elt0(list, context):
-    """return first member of reduction"""
-    return list[0]
-
-def elt1(list, context):
-    """return second member"""
-    return list[1]
-
-def elt2(list, context):
-    return list[2]
-
-def returnNone(list, context):
-    return None
-
-def stat1(list, context):
-    """return list of len 1 of statements"""
-    return list
-
-#def statn(list, context):
-#    """return a list of statement reductions"""
-#    [stat, semi, statlist] = list
-#    statlist.insert(0, stat)
-#    return statlist
-
-def thingcommalist(l, c):
-    [thing, comma, list] = l
-    list.insert(0, thing)
-    return list
-
-def listcommathing(l, c):
-    [list, comma, thing] = l
-    list.append(thing)
-    return list
-
-statn = thingcommalist
-selstat = elt0
-insstat = elt0
-createtablestat = elt0
-droptablestat = elt0
-delstat = elt0
-updatestat = elt0
-createindexstat = elt0
-dropindexstat = elt0
-createviewstat = elt0
-dropviewstat = elt0
-
-# drop view statement stuff
-def dropview(l, c):
-    [drop, view, name] = l
-    from sqlsem import DropView
-    return DropView(name)
-
-# create view statement stuff
-def createview(l, c):
-    [create, view, name, namelist, as, selection] = l
-    from sqlsem import CreateView
-    return CreateView(name, namelist, selection)
-
-optnamelist0 = returnNone
-optnamelistn = elt1
-
-# drop index statement stuff
-def dropindex(l, c):
-    [drop, index, name] = l
-    from sqlsem import DropIndex
-    return DropIndex(name)
-
-# create index statement stuff
-def createindex(l, c):
-    [create, index, name, on, table, op, namelist, cp] = l
-    from sqlsem import CreateIndex
-    return CreateIndex(name, table, namelist)
-
-def createuniqueindex(l, c):
-    [create, unique, index, name, on, table, op, namelist, cp] = l
-    from sqlsem import CreateIndex
-    return CreateIndex(name, table, namelist, unique=1)
-
-names1 = stat1
-namesn = listcommathing
-
-# update statement stuff
-
-def update(l, c):
-    [upd, name, set, assns, condition] = l
-    from sqlsem import UpdateOp
-    return UpdateOp(name, assns, condition)
-
-def assn(l, c):
-    [col, eq, exp] = l
-    return (col, exp)
-
-def assn1(l, c):
-    [ (col, exp) ] = l
-    from sqlsem import TupleCollector
-    result = TupleCollector()
-    result.addbinding(col, exp)
-    return result
-
-def assnn(l, c):
-    [ result, comma, (col, exp) ] = l
-    result.addbinding(col, exp)
-    return result
-
-# delete statement stuff
-
-def deletefrom(l, c):
-    [delete, fromkw, name, where] = l
-    from sqlsem import DeleteOp
-    return DeleteOp(name, where)
-
-# drop table stuff
-
-def droptable(l, c):
-    [drop, table, name] = l
-    from sqlsem import DropTable
-    return DropTable(name)
-
-# create table statement stuff
-
-def createtable(list, context):
-    [create, table, name, p1, colelts, p2] = list
-    from sqlsem import CreateTable
-    return CreateTable(name, colelts)
-
-colelts1 = stat1
-coleltsn = listcommathing
-#def coleltsn(list, c):
-#    [c1, cc, ce] = list
-#    c1.append(ce)
-#    return c1
-
-coleltid = elt0
-coleltconstraint = elt0
-
-def coldef(l, c):
-    [colid, datatype, default, constraints] = l
-    from sqlsem import ColumnDef
-    return ColumnDef(colid, datatype, default, constraints)
-
-optdef0 = returnNone
-optcolconstr0 = returnNone
-stringtype = exnumtype = appnumtype = integer = float = varchar = elt0
-varcharn = elt0
-
-# insert statement stuff
-
-def insert1(l, c):
-    [insert, into, name, optcolids, insert_spec] = l
-    from sqlsem import InsertOp
-    return InsertOp(name, optcolids, insert_spec)
-
-optcolids0 = returnNone
-optcolids1 = elt1
-colids1 = stat1
-colidsn = listcommathing
-
-def insert_values(l, c):
-    from sqlsem import InsertValues
-    return InsertValues(l[2])
-
-def insert_query(l, c):
-    from sqlsem import InsertSubSelect
-    return InsertSubSelect(l[0])
-
-litlist1 = stat1
-litlistn = listcommathing
-
-sliteral0 = elt0
-def sliteralp(l, c):
-    [p, v] = l
-    return +v
-
-def sliterald(l, c):
-    [l1, m, l2] = l
-    return l1 - l2
-
-def sliterals(l, c):
-    [l1, p, l2] = l
-    return l1 + l2
-
-def sliteralm(l, c):
-    [m, v] = l
-    return -v
-
-# select statement stuff
-
-def selectx(list, context):
-    [sub, optorder_by] = list
-    #sub.union_select = optunion
-    sub.order_by = optorder_by
-    # number of dynamic parameters in this parse.
-    sub.ndynamic = context.ndynamic()
-    return sub
-
-psubselect = elt1
-
-def subselect(list, context):
-    [select, alldistinct, selectlist, fromkw, trlist,
-     optwhere, optgroup, opthaving, optunion] = list
-    from sqlsem import Selector
-    sel = Selector(
-      alldistinct,
-      selectlist,
-      trlist,
-      optwhere,
-      optgroup,
-      opthaving,
-      # store # of dynamic parameters seen in this parse.
-      ndynamic = context.ndynamic()
-      )
-    sel.union_select = optunion
-    return sel
-
-def ad0(list, context):
-    return "ALL"
-
-adall = ad0
-
-def addistinct(list, context):
-    return "DISTINCT"
-
-def where0(list, context):
-    from sqlsem import BTPredicate
-    return BTPredicate() # true
-
-where1 = elt1
-
-group0 = returnNone
-
-group1 = elt2
-
-colnames1 = stat1
-
-colnamesn = listcommathing
-
-having0 = returnNone
-
-having1 = elt1
-
-union0 = returnNone
-
-def union1(l, c):
-    [union, alldistinct, selection] = l
-    from sqlsem import Union
-    return Union(alldistinct, selection)
-
-def except1(l, c):
-    [union, selection] = l
-    alldistinct = "DISTINCT"
-    from sqlsem import Except
-    return Except(alldistinct, selection)
-
-def intersect1(l, c):
-    [union, selection] = l
-    alldistinct = "DISTINCT"
-    from sqlsem import Intersect
-    return Intersect(alldistinct, selection)
-
-order0 = returnNone
-order1 = elt2
-#orderby = elt2
-sortspec1 = stat1
-sortspecn = listcommathing
-
-def sortint(l, c):
-    from sqlsem import PositionedSort
-    [num, ord] = l
-    from types import IntType
-    if type(num)!=IntType or num<=0:
-        raise ValueError, `num`+': col position not positive int'
-    return PositionedSort(num, ord)
-
-def sortcol(l, c):
-    from sqlsem import NamedSort
-    [name, ord] = l
-    return NamedSort(name, ord)
-
-def optord0(l, c):
-    return "ASC"
-
-optordasc = optord0
-
-def optorddesc(l, c):
-    return "DESC"
-
-## table reference list returns list of (name, name) or (name, alias)
-def trl1(l, c):
-    [name] = l
-    return [(name, name)]
-
-def trln(l,c):
-    [name, comma, others] = l
-    others.insert(0, (name, name))
-    return others
-
-def trl1a(l,c):
-    [name, alias] = l
-    return [(name, alias)]
-
-def trlna(l,c):
-    [name, alias, comma, others] = l
-    others.insert(0, (name, alias))
-    return others
-
-def trl1as(l,c):
-    [name, as, alias] = l
-    return [(name, alias)]
-
-def trlnas(l,c):
-    [name, as, alias, comma, others] = l
-    others.insert(0, (name, alias))
-    return others
-
-tablename1 = elt0
-columnid1 = elt0
-
-def columnname1(list, context):
-    [ci] = list
-    return columnname2([None, None, ci], context)
-
-def columnname2(list, context):
-    [table, ignore, col] = list
-    from sqlsem import BoundAttribute
-    return BoundAttribute(table, col)
-
-def dynamic(list, context):
-    from sqlsem import BoundAttribute
-    # return a new dynamic parameter
-    int = context.param()
-    return BoundAttribute(0, int)
-
-# expression stuff
-def literal(list, context):
-    [lit] = list
-    from sqlsem import Constant
-    return Constant(lit)
-
-def stringstring(l, c):
-    """two strings in sequence = apostrophe"""
-    [l1, l2] = l
-    from sqlsem import Constant
-    value = "%s'%s" % (l1.value0, l2)
-    return Constant(value)
-
-numlit = literal
-stringlit = literal
-primarylit = elt0
-primary1 = elt0
-factor1 = elt0
-term1 = elt0
-exp1 = elt0
-
-def expplus(list, context):
-    [exp, plus, term] = list
-    return exp + term
-
-def expminus(list, context):
-    [exp, minus, term] = list
-    return exp - term
-
-def termtimes(list, context):
-    [exp, times, term] = list
-    return exp * term
-
-def termdiv(list, context):
-    [exp, div, term] = list
-    return exp / term
-
-plusfactor = elt1
-
-def minusfactor(list, context):
-    [minus, factor] = list
-    return -factor
-
-primaryexp = elt1
-
-primaryset = elt0
-
-def countstar(l, c):
-    from sqlsem import Count
-    return Count("*")
-
-def distinctset(l, c):
-    [agg, p1, distinct, exp, p2] = l
-    return set(agg, exp, 1)
-
-distinctcount = distinctset
-
-def allset(l, c):
-    [agg, p1, exp, p2] = l
-    return set(agg, exp, 0)
-
-allcount = allset
-
-def set(agg, exp, distinct):
-    import sqlsem
-    if agg=="AVG":
-        return sqlsem.Average(exp, distinct)
-    if agg=="COUNT":
-        return sqlsem.Count(exp, distinct)
-    if agg=="MAX":
-        return sqlsem.Maximum(exp, distinct)
-    if agg=="MIN":
-        return sqlsem.Minimum(exp, distinct)
-    if agg=="SUM":
-        return sqlsem.Sum(exp, distinct)
-    if agg=="MEDIAN":
-        return sqlsem.Median(exp, distinct)
-    raise NameError, `agg`+": unknown aggregate"
-
-average = count = maximum = minimum = summation = median = elt0
-
-def predicateeq(list, context):
-    [e1, eq, e2] = list
-    return e1.equate(e2)
-
-def predicatene(list, context):
-    [e1, lt, gt, e2] = list
-    return ~(e1.equate(e2))
-
-def predicatelt(list, context):
-    [e1, lt, e2] = list
-    return e1.lt(e2)
-
-def predicategt(list, context):
-    [e1, lt, e2] = list
-    return e2.lt(e1)
-
-def predicatele(list, context):
-    [e1, lt, eq, e2] = list
-    return e1.le(e2)
-
-def predicatege(list, context):
-    [e1, lt, eq, e2] = list
-    return e2.le(e1)
-
-def predbetween(list, context):
-    [e1, between, e2, andkw, e3] = list
-    from sqlsem import BetweenPredicate
-    return BetweenPredicate(e1, e2, e3)
-
-def prednotbetween(list, context):
-    [e1, notkw, between, e2, andkw, e3] = list
-    from sqlsem import BetweenPredicate
-    return ~BetweenPredicate(e1, e2, e3)
-
-predicate1 = elt0
-bps = elt1
-bp1 = elt0
-
-# exists predicate stuff
-predexists = elt0
-def exists(l, c):
-    [ex, paren1, subquery, paren2] = l
-    from sqlsem import ExistsPred
-    return ExistsPred(subquery)
-
-def notbf(list, context):
-    [ notst, thing ] = list
-    return ~thing
-
-# quantified predicates
-nnall = elt0
-nnany = elt0
-
-def predqeq(list, context):
-    [exp, eq, allany, p1, subq, p2] = list
-    from sqlsem import QuantEQ, QuantNE
-    if allany=="ANY":
-        return QuantEQ(exp, subq)
-    else:
-        return ~QuantNE(exp, subq)
-
-def predqne(list, context):
-    [exp, lt, gt, allany, p1, subq, p2] = list
-    from sqlsem import QuantEQ, QuantNE
-    if allany=="ANY":
-        return QuantNE(exp, subq)
-    else:
-        return ~QuantEQ(exp, subq)
-
-def predqlt(list, context):
-    [exp, lt, allany, p1, subq, p2] = list
-    from sqlsem import QuantLT, QuantGE
-    if allany=="ANY":
-        return QuantLT(exp, subq)
-    else:
-        return ~QuantGE(exp, subq)
-
-def predqgt(list, context):
-    [exp, gt, allany, p1, subq, p2] = list
-    from sqlsem import QuantGT, QuantLE
-    if allany=="ANY":
-        return QuantGT(exp, subq)
-    else:
-        return ~QuantLE(exp, subq)
-
-def predqle(list, context):
-    [exp, less, eq, allany, p1, subq, p2] = list
-    from sqlsem import QuantGT, QuantLE
-    if allany=="ANY":
-        return QuantLE(exp, subq)
-    else:
-        return ~QuantGT(exp, subq)
-
-def predqge(list, context):
-    [exp, gt, eq, allany, p1, subq, p2] = list
-    from sqlsem import QuantGE, QuantLT
-    if allany=="ANY":
-        return QuantGE(exp, subq)
-    else:
-        return ~QuantLT(exp, subq)
-
-# subquery expression
-def subqexpr(list, context):
-    [p1, subq, p2] = list
-    from sqlsem import SubQueryExpression
-    return SubQueryExpression(subq)
-
-def predin(list, context):
-    [exp, inkw, p1, subq, p2] = list
-    from sqlsem import InPredicate
-    return InPredicate(exp, subq)
-
-def prednotin(list, context):
-    [exp, notkw, inkw, p1, subq, p2] = list
-    from sqlsem import InPredicate
-    return ~InPredicate(exp, subq)
-
-def predinlits(list, context):
-    [exp, inkw, p1, lits, p2] = list
-    from sqlsem import InLits
-    return InLits(exp, lits)
-
-def prednotinlits(list, context):
-    [exp, notkw, inkw, p1, lits, p2] = list
-    from sqlsem import InLits
-    return ~InLits(exp, lits)
-
-
-bf1 = elt0
-
-def booln(list, context):
-    [ e1, andst, e2 ] = list
-    return e1&e2
-
-bool1 = elt0
-
-def searchn(list, context):
-    [ e1, orst, e2 ] = list
-    return e1 | e2
-
-search1 = elt0
-
-colalias = elt0
-
-# select list stuff
-def selectstar(l,c):
-    return "*"
-
-selectsome = elt0
-select1 = elt0
-
-# selectsub returns (expression, asname)
-
-def select1(list, context):
-    [ (exp, name) ] = list
-    from sqlsem import TupleCollector
-    result = TupleCollector()
-    result.addbinding(name, exp)
-    return result
-
-def selectn(list, context):
-    [ selectsubs, comma, select_sublist ] = list
-    (exp, name) = select_sublist
-    selectsubs.addbinding(name, exp)
-    return selectsubs
-
-def selectit(list, context):
-    [exp] = list
-    return (exp, None) # no binding!
-
-def selectname(list, context):
-    [exp, as, alias] = list
-    return (exp, alias)
-
-colalias = elt0
-
-
-#### do the bindings.
-
-# note: all reduction function defs must precede this assign
-VARS = vars()
-
-class punter:
-    def __init__(self, name):
-        self.name = name
-    def __call__(self, list, context):
-        print "punt:", self.name, list
-        return list
-
-class tracer:
-    def __init__(self, name, fn):
-        self.name = name
-        self.fn = fn
-
-    def __call__(self, list, context):
-        print self.name, list
-        return self.fn(list, context)
-
-def BindRules(sqlg):
-    for name in sqlg.RuleNameToIndex.keys():
-        if VARS.has_key(name):
-            #print "binding", name
-            sqlg.Bind(name, VARS[name]) # nondebug
-            #sqlg.Bind(name, tracer(name, VARS[name]) ) # debug
-        else:
-            print "unbound", name
-            sqlg.Bind(name, punter(name))
-    return sqlg

Copied: zope.rdb/tags/3.4.1/src/zope/rdb/gadfly/sqlbind.py (from rev 91892, zope.rdb/trunk/src/zope/rdb/gadfly/sqlbind.py)
===================================================================
--- zope.rdb/tags/3.4.1/src/zope/rdb/gadfly/sqlbind.py	                        (rev 0)
+++ zope.rdb/tags/3.4.1/src/zope/rdb/gadfly/sqlbind.py	2008-10-10 16:53:36 UTC (rev 91998)
@@ -0,0 +1,623 @@
+"""rule bindings for sql grammar."""
+
+def elt0(list, context):
+    """return first member of reduction"""
+    return list[0]
+
+def elt1(list, context):
+    """return second member"""
+    return list[1]
+
+def elt2(list, context):
+    return list[2]
+
+def returnNone(list, context):
+    return None
+
+def stat1(list, context):
+    """return list of len 1 of statements"""
+    return list
+
+#def statn(list, context):
+#    """return a list of statement reductions"""
+#    [stat, semi, statlist] = list
+#    statlist.insert(0, stat)
+#    return statlist
+
+def thingcommalist(l, c):
+    [thing, comma, list] = l
+    list.insert(0, thing)
+    return list
+
+def listcommathing(l, c):
+    [list, comma, thing] = l
+    list.append(thing)
+    return list
+
+statn = thingcommalist
+selstat = elt0
+insstat = elt0
+createtablestat = elt0
+droptablestat = elt0
+delstat = elt0
+updatestat = elt0
+createindexstat = elt0
+dropindexstat = elt0
+createviewstat = elt0
+dropviewstat = elt0
+
+# drop view statement stuff
+def dropview(l, c):
+    [drop, view, name] = l
+    from sqlsem import DropView
+    return DropView(name)
+
+# create view statement stuff
+def createview(l, c):
+    [create, view, name, namelist, as_, selection] = l
+    from sqlsem import CreateView
+    return CreateView(name, namelist, selection)
+
+optnamelist0 = returnNone
+optnamelistn = elt1
+
+# drop index statement stuff
+def dropindex(l, c):
+    [drop, index, name] = l
+    from sqlsem import DropIndex
+    return DropIndex(name)
+
+# create index statement stuff
+def createindex(l, c):
+    [create, index, name, on, table, op, namelist, cp] = l
+    from sqlsem import CreateIndex
+    return CreateIndex(name, table, namelist)
+
+def createuniqueindex(l, c):
+    [create, unique, index, name, on, table, op, namelist, cp] = l
+    from sqlsem import CreateIndex
+    return CreateIndex(name, table, namelist, unique=1)
+
+names1 = stat1
+namesn = listcommathing
+
+# update statement stuff
+
+def update(l, c):
+    [upd, name, set, assns, condition] = l
+    from sqlsem import UpdateOp
+    return UpdateOp(name, assns, condition)
+
+def assn(l, c):
+    [col, eq, exp] = l
+    return (col, exp)
+
+def assn1(l, c):
+    [ (col, exp) ] = l
+    from sqlsem import TupleCollector
+    result = TupleCollector()
+    result.addbinding(col, exp)
+    return result
+
+def assnn(l, c):
+    [ result, comma, (col, exp) ] = l
+    result.addbinding(col, exp)
+    return result
+
+# delete statement stuff
+
+def deletefrom(l, c):
+    [delete, fromkw, name, where] = l
+    from sqlsem import DeleteOp
+    return DeleteOp(name, where)
+
+# drop table stuff
+
+def droptable(l, c):
+    [drop, table, name] = l
+    from sqlsem import DropTable
+    return DropTable(name)
+
+# create table statement stuff
+
+def createtable(list, context):
+    [create, table, name, p1, colelts, p2] = list
+    from sqlsem import CreateTable
+    return CreateTable(name, colelts)
+
+colelts1 = stat1
+coleltsn = listcommathing
+#def coleltsn(list, c):
+#    [c1, cc, ce] = list
+#    c1.append(ce)
+#    return c1
+
+coleltid = elt0
+coleltconstraint = elt0
+
+def coldef(l, c):
+    [colid, datatype, default, constraints] = l
+    from sqlsem import ColumnDef
+    return ColumnDef(colid, datatype, default, constraints)
+
+optdef0 = returnNone
+optcolconstr0 = returnNone
+stringtype = exnumtype = appnumtype = integer = float = varchar = elt0
+varcharn = elt0
+
+# insert statement stuff
+
+def insert1(l, c):
+    [insert, into, name, optcolids, insert_spec] = l
+    from sqlsem import InsertOp
+    return InsertOp(name, optcolids, insert_spec)
+
+optcolids0 = returnNone
+optcolids1 = elt1
+colids1 = stat1
+colidsn = listcommathing
+
+def insert_values(l, c):
+    from sqlsem import InsertValues
+    return InsertValues(l[2])
+
+def insert_query(l, c):
+    from sqlsem import InsertSubSelect
+    return InsertSubSelect(l[0])
+
+litlist1 = stat1
+litlistn = listcommathing
+
+sliteral0 = elt0
+def sliteralp(l, c):
+    [p, v] = l
+    return +v
+
+def sliterald(l, c):
+    [l1, m, l2] = l
+    return l1 - l2
+
+def sliterals(l, c):
+    [l1, p, l2] = l
+    return l1 + l2
+
+def sliteralm(l, c):
+    [m, v] = l
+    return -v
+
+# select statement stuff
+
+def selectx(list, context):
+    [sub, optorder_by] = list
+    #sub.union_select = optunion
+    sub.order_by = optorder_by
+    # number of dynamic parameters in this parse.
+    sub.ndynamic = context.ndynamic()
+    return sub
+
+psubselect = elt1
+
+def subselect(list, context):
+    [select, alldistinct, selectlist, fromkw, trlist,
+     optwhere, optgroup, opthaving, optunion] = list
+    from sqlsem import Selector
+    sel = Selector(
+      alldistinct,
+      selectlist,
+      trlist,
+      optwhere,
+      optgroup,
+      opthaving,
+      # store # of dynamic parameters seen in this parse.
+      ndynamic = context.ndynamic()
+      )
+    sel.union_select = optunion
+    return sel
+
+def ad0(list, context):
+    return "ALL"
+
+adall = ad0
+
+def addistinct(list, context):
+    return "DISTINCT"
+
+def where0(list, context):
+    from sqlsem import BTPredicate
+    return BTPredicate() # true
+
+where1 = elt1
+
+group0 = returnNone
+
+group1 = elt2
+
+colnames1 = stat1
+
+colnamesn = listcommathing
+
+having0 = returnNone
+
+having1 = elt1
+
+union0 = returnNone
+
+def union1(l, c):
+    [union, alldistinct, selection] = l
+    from sqlsem import Union
+    return Union(alldistinct, selection)
+
+def except1(l, c):
+    [union, selection] = l
+    alldistinct = "DISTINCT"
+    from sqlsem import Except
+    return Except(alldistinct, selection)
+
+def intersect1(l, c):
+    [union, selection] = l
+    alldistinct = "DISTINCT"
+    from sqlsem import Intersect
+    return Intersect(alldistinct, selection)
+
+order0 = returnNone
+order1 = elt2
+#orderby = elt2
+sortspec1 = stat1
+sortspecn = listcommathing
+
+def sortint(l, c):
+    from sqlsem import PositionedSort
+    [num, ord] = l
+    from types import IntType
+    if type(num)!=IntType or num<=0:
+        raise ValueError, `num`+': col position not positive int'
+    return PositionedSort(num, ord)
+
+def sortcol(l, c):
+    from sqlsem import NamedSort
+    [name, ord] = l
+    return NamedSort(name, ord)
+
+def optord0(l, c):
+    return "ASC"
+
+optordasc = optord0
+
+def optorddesc(l, c):
+    return "DESC"
+
+## table reference list returns list of (name, name) or (name, alias)
+def trl1(l, c):
+    [name] = l
+    return [(name, name)]
+
+def trln(l,c):
+    [name, comma, others] = l
+    others.insert(0, (name, name))
+    return others
+
+def trl1a(l,c):
+    [name, alias] = l
+    return [(name, alias)]
+
+def trlna(l,c):
+    [name, alias, comma, others] = l
+    others.insert(0, (name, alias))
+    return others
+
+def trl1as(l,c):
+    [name, as_, alias] = l
+    return [(name, alias)]
+
+def trlnas(l,c):
+    [name, as_, alias, comma, others] = l
+    others.insert(0, (name, alias))
+    return others
+
+tablename1 = elt0
+columnid1 = elt0
+
+def columnname1(list, context):
+    [ci] = list
+    return columnname2([None, None, ci], context)
+
+def columnname2(list, context):
+    [table, ignore, col] = list
+    from sqlsem import BoundAttribute
+    return BoundAttribute(table, col)
+
+def dynamic(list, context):
+    from sqlsem import BoundAttribute
+    # return a new dynamic parameter
+    int = context.param()
+    return BoundAttribute(0, int)
+
+# expression stuff
+def literal(list, context):
+    [lit] = list
+    from sqlsem import Constant
+    return Constant(lit)
+
+def stringstring(l, c):
+    """two strings in sequence = apostrophe"""
+    [l1, l2] = l
+    from sqlsem import Constant
+    value = "%s'%s" % (l1.value0, l2)
+    return Constant(value)
+
+numlit = literal
+stringlit = literal
+primarylit = elt0
+primary1 = elt0
+factor1 = elt0
+term1 = elt0
+exp1 = elt0
+
+def expplus(list, context):
+    [exp, plus, term] = list
+    return exp + term
+
+def expminus(list, context):
+    [exp, minus, term] = list
+    return exp - term
+
+def termtimes(list, context):
+    [exp, times, term] = list
+    return exp * term
+
+def termdiv(list, context):
+    [exp, div, term] = list
+    return exp / term
+
+plusfactor = elt1
+
+def minusfactor(list, context):
+    [minus, factor] = list
+    return -factor
+
+primaryexp = elt1
+
+primaryset = elt0
+
+def countstar(l, c):
+    from sqlsem import Count
+    return Count("*")
+
+def distinctset(l, c):
+    [agg, p1, distinct, exp, p2] = l
+    return set(agg, exp, 1)
+
+distinctcount = distinctset
+
+def allset(l, c):
+    [agg, p1, exp, p2] = l
+    return set(agg, exp, 0)
+
+allcount = allset
+
+def set(agg, exp, distinct):
+    import sqlsem
+    if agg=="AVG":
+        return sqlsem.Average(exp, distinct)
+    if agg=="COUNT":
+        return sqlsem.Count(exp, distinct)
+    if agg=="MAX":
+        return sqlsem.Maximum(exp, distinct)
+    if agg=="MIN":
+        return sqlsem.Minimum(exp, distinct)
+    if agg=="SUM":
+        return sqlsem.Sum(exp, distinct)
+    if agg=="MEDIAN":
+        return sqlsem.Median(exp, distinct)
+    raise NameError, `agg`+": unknown aggregate"
+
+average = count = maximum = minimum = summation = median = elt0
+
+def predicateeq(list, context):
+    [e1, eq, e2] = list
+    return e1.equate(e2)
+
+def predicatene(list, context):
+    [e1, lt, gt, e2] = list
+    return ~(e1.equate(e2))
+
+def predicatelt(list, context):
+    [e1, lt, e2] = list
+    return e1.lt(e2)
+
+def predicategt(list, context):
+    [e1, lt, e2] = list
+    return e2.lt(e1)
+
+def predicatele(list, context):
+    [e1, lt, eq, e2] = list
+    return e1.le(e2)
+
+def predicatege(list, context):
+    [e1, lt, eq, e2] = list
+    return e2.le(e1)
+
+def predbetween(list, context):
+    [e1, between, e2, andkw, e3] = list
+    from sqlsem import BetweenPredicate
+    return BetweenPredicate(e1, e2, e3)
+
+def prednotbetween(list, context):
+    [e1, notkw, between, e2, andkw, e3] = list
+    from sqlsem import BetweenPredicate
+    return ~BetweenPredicate(e1, e2, e3)
+
+predicate1 = elt0
+bps = elt1
+bp1 = elt0
+
+# exists predicate stuff
+predexists = elt0
+def exists(l, c):
+    [ex, paren1, subquery, paren2] = l
+    from sqlsem import ExistsPred
+    return ExistsPred(subquery)
+
+def notbf(list, context):
+    [ notst, thing ] = list
+    return ~thing
+
+# quantified predicates
+nnall = elt0
+nnany = elt0
+
+def predqeq(list, context):
+    [exp, eq, allany, p1, subq, p2] = list
+    from sqlsem import QuantEQ, QuantNE
+    if allany=="ANY":
+        return QuantEQ(exp, subq)
+    else:
+        return ~QuantNE(exp, subq)
+
+def predqne(list, context):
+    [exp, lt, gt, allany, p1, subq, p2] = list
+    from sqlsem import QuantEQ, QuantNE
+    if allany=="ANY":
+        return QuantNE(exp, subq)
+    else:
+        return ~QuantEQ(exp, subq)
+
+def predqlt(list, context):
+    [exp, lt, allany, p1, subq, p2] = list
+    from sqlsem import QuantLT, QuantGE
+    if allany=="ANY":
+        return QuantLT(exp, subq)
+    else:
+        return ~QuantGE(exp, subq)
+
+def predqgt(list, context):
+    [exp, gt, allany, p1, subq, p2] = list
+    from sqlsem import QuantGT, QuantLE
+    if allany=="ANY":
+        return QuantGT(exp, subq)
+    else:
+        return ~QuantLE(exp, subq)
+
+def predqle(list, context):
+    [exp, less, eq, allany, p1, subq, p2] = list
+    from sqlsem import QuantGT, QuantLE
+    if allany=="ANY":
+        return QuantLE(exp, subq)
+    else:
+        return ~QuantGT(exp, subq)
+
+def predqge(list, context):
+    [exp, gt, eq, allany, p1, subq, p2] = list
+    from sqlsem import QuantGE, QuantLT
+    if allany=="ANY":
+        return QuantGE(exp, subq)
+    else:
+        return ~QuantLT(exp, subq)
+
+# subquery expression
+def subqexpr(list, context):
+    [p1, subq, p2] = list
+    from sqlsem import SubQueryExpression
+    return SubQueryExpression(subq)
+
+def predin(list, context):
+    [exp, inkw, p1, subq, p2] = list
+    from sqlsem import InPredicate
+    return InPredicate(exp, subq)
+
+def prednotin(list, context):
+    [exp, notkw, inkw, p1, subq, p2] = list
+    from sqlsem import InPredicate
+    return ~InPredicate(exp, subq)
+
+def predinlits(list, context):
+    [exp, inkw, p1, lits, p2] = list
+    from sqlsem import InLits
+    return InLits(exp, lits)
+
+def prednotinlits(list, context):
+    [exp, notkw, inkw, p1, lits, p2] = list
+    from sqlsem import InLits
+    return ~InLits(exp, lits)
+
+
+bf1 = elt0
+
+def booln(list, context):
+    [ e1, andst, e2 ] = list
+    return e1&e2
+
+bool1 = elt0
+
+def searchn(list, context):
+    [ e1, orst, e2 ] = list
+    return e1 | e2
+
+search1 = elt0
+
+colalias = elt0
+
+# select list stuff
+def selectstar(l,c):
+    return "*"
+
+selectsome = elt0
+select1 = elt0
+
+# selectsub returns (expression, asname)
+
+def select1(list, context):
+    [ (exp, name) ] = list
+    from sqlsem import TupleCollector
+    result = TupleCollector()
+    result.addbinding(name, exp)
+    return result
+
+def selectn(list, context):
+    [ selectsubs, comma, select_sublist ] = list
+    (exp, name) = select_sublist
+    selectsubs.addbinding(name, exp)
+    return selectsubs
+
+def selectit(list, context):
+    [exp] = list
+    return (exp, None) # no binding!
+
+def selectname(list, context):
+    [exp, as_, alias] = list
+    return (exp, alias)
+
+colalias = elt0
+
+
+#### do the bindings.
+
+# note: all reduction function defs must precede this assign
+VARS = vars()
+
+class punter:
+    def __init__(self, name):
+        self.name = name
+    def __call__(self, list, context):
+        print "punt:", self.name, list
+        return list
+
+class tracer:
+    def __init__(self, name, fn):
+        self.name = name
+        self.fn = fn
+
+    def __call__(self, list, context):
+        print self.name, list
+        return self.fn(list, context)
+
+def BindRules(sqlg):
+    for name in sqlg.RuleNameToIndex.keys():
+        if VARS.has_key(name):
+            #print "binding", name
+            sqlg.Bind(name, VARS[name]) # nondebug
+            #sqlg.Bind(name, tracer(name, VARS[name]) ) # debug
+        else:
+            print "unbound", name
+            sqlg.Bind(name, punter(name))
+    return sqlg

Modified: zope.rdb/tags/3.4.1/src/zope/rdb/gadflyzcml.py
===================================================================
--- zope.rdb/trunk/src/zope/rdb/gadflyzcml.py	2008-10-08 03:53:21 UTC (rev 91891)
+++ zope.rdb/tags/3.4.1/src/zope/rdb/gadflyzcml.py	2008-10-10 16:53:36 UTC (rev 91998)
@@ -13,7 +13,7 @@
 ##############################################################################
 """'gadflyRoot' Directive Handler
 
-$Id: metaconfigure.py 25177 2004-06-02 13:17:31Z jim $
+$Id$
 """
 from zope.configuration.fields import Path
 from zope.interface import Interface

Deleted: zope.rdb/tags/3.4.1/src/zope/rdb/interfaces.py
===================================================================
--- zope.rdb/trunk/src/zope/rdb/interfaces.py	2008-10-08 03:53:21 UTC (rev 91891)
+++ zope.rdb/tags/3.4.1/src/zope/rdb/interfaces.py	2008-10-10 16:53:36 UTC (rev 91998)
@@ -1,335 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Relational Database Adapter interfaces.
-
-$Id$
-"""
-from zope.interface import Interface
-from zope.interface import Attribute
-from zope.schema import TextLine
-from zope.i18nmessageid import MessageFactory
-
-_ = MessageFactory('zope')
-
-
-class IDBITypeInfoProvider(Interface):
-    """This object can get the Type Info for a particular DBI
-    implementation."""
-
-    def getTypeInfo():
-        """Return an IDBITypeInfo object."""
-
-class IDBITypeInfo(Interface):
-    """Database adapter specific information"""
-
-    paramstyle = Attribute("""
-        String constant stating the type of parameter marker formatting
-        expected by the interface. Possible values are [2]:
-
-       'qmark' = Question mark style, e.g. '...WHERE name=?'
-       'numeric' = Numeric, positional style, e.g. '...WHERE name=:1'
-       'named' = Named style, e.g. '...WHERE name=:name'
-       'format' = ANSI C printf format codes, e.g. '...WHERE name=%s'
-       'pyformat' = Python extended format codes, e.g. '...WHERE name=%(name)s'
-       """)
-
-    threadsafety = Attribute("""
-        Integer constant stating the level of thread safety the interface
-        supports. Possible values are:
-
-            0 = Threads may not share the module.
-            1 = Threads may share the module, but not connections.
-            2 = Threads may share the module and connections.
-            3 = Threads may share the module, connections and cursors.
-
-        Sharing in the above context means that two threads may use a resource
-        without wrapping it using a mutex semaphore to implement resource
-        locking. Note that you cannot always make external resources thread
-        safe by managing access using a mutex: the resource may rely on global
-        variables or other external sources that are beyond your control.
-        """)
-
-    encoding = TextLine(
-        title=_("Database encoding"),
-        description=_("Encoding of the database content"),
-        default=u"utf-8",
-        required=False
-        )
-
-    def getEncoding():
-        """Get the database encoding."""
-
-    def setEncoding(encoding):
-        """Set the database encoding."""
-
-    def getConverter(type):
-        """Return a converter function for field type matching key"""
-
-class IResultSet(Interface):
-    """Holds results, and allows iteration."""
-
-    columns = Attribute("""A list of the column names of the returned result
-                           set.""")
-
-    def __getitem__(index):
-        """Return a brain row for index."""
-
-
-class DatabaseException(Exception):
-    """Generic Database Error"""
-
-    def __init__(self, message):
-        self.message = message
-
-    def __str__(self):
-        return self.message
-
-class DatabaseAdapterError(DatabaseException):
-    pass
-
-arraysize = 1 # default constant, symbolic
-
-class IDBICursor(Interface):
-    """DB API ICursor interface"""
-
-    description = Attribute("""This read-only attribute is a sequence of
-        7-item sequences. Each of these sequences contains information
-        describing one result column: (name, type_code, display_size,
-        internal_size, precision, scale, null_ok). This attribute will be None
-        for operations that do not return rows or if the cursor has not had an
-        operation invoked via the executeZZZ() method yet.
-
-        The type_code can be interpreted by comparing it to the Type Objects
-        specified in the section below. """)
-
-    arraysize = Attribute("""This read/write attribute specifies the number of
-        rows to fetch at a time with fetchmany(). It defaults to 1 meaning to
-        fetch a single row at a time.
-
-        Implementations must observe this value with respect to the
-        fetchmany() method, but are free to interact with the database a
-        single row at a time. It may also be used in the implementation of
-        executemany().
-        """)
-
-    def close():
-        """Close the cursor now (rather than whenever __del__ is called).  The
-        cursor will be unusable from this point forward; an Error (or
-        subclass) exception will be raised if any operation is attempted with
-        the cursor.
-        """
-
-    def execute(operation, parameters=None):
-        """Prepare and execute a database operation (query or
-        command). Parameters may be provided as sequence or mapping and will
-        be bound to variables in the operation. Variables are specified in a
-        database-specific notation (see the module's paramstyle attribute for
-        details). [5]
-
-        A reference to the operation will be retained by the cursor. If the
-        same operation object is passed in again, then the cursor can optimize
-        its behavior. This is most effective for algorithms where the same
-        operation is used, but different parameters are bound to it (many
-        times).
-
-        For maximum efficiency when reusing an operation, it is best to use
-        the setinputsizes() method to specify the parameter types and sizes
-        ahead of time. It is legal for a parameter to not match the predefined
-        information; the implementation should compensate, possibly with a
-        loss of efficiency.
-
-        The parameters may also be specified as list of tuples to e.g. insert
-        multiple rows in a single operation, but this kind of usage is
-        depreciated: executemany() should be used instead.
-
-        Return values are not defined.
-        """
-
-    def executemany(operation, seq_of_parameters):
-        """Prepare a database operation (query or command) and then execute it
-        against all parameter sequences or mappings found in the sequence
-        seq_of_parameters.
-
-        Modules are free to implement this method using multiple calls to the
-        execute() method or by using array operations to have the database
-        process the sequence as a whole in one call.
-
-        The same comments as for execute() also apply accordingly to this
-        method.
-
-        Return values are not defined.
-        """
-
-    def fetchone():
-        """Fetch the next row of a query result set, returning a single
-        sequence, or None when no more data is available. [6]
-
-        An Error (or subclass) exception is raised if the previous call to
-        executeZZZ() did not produce any result set or no call was issued yet.
-        """
-
-    def fetchmany(size=arraysize):
-        """Fetch the next set of rows of a query result, returning a sequence
-        of sequences (e.g. a list of tuples). An empty sequence is returned
-        when no more rows are available.
-
-        The number of rows to fetch per call is specified by the parameter. If
-        it is not given, the cursor's arraysize determines the number of rows
-        to be fetched. The method should try to fetch as many rows as
-        indicated by the size parameter. If this is not possible due to the
-        specified number of rows not being available, fewer rows may be
-        returned.
-
-        An Error (or subclass) exception is raised if the previous call to
-        executeZZZ() did not produce any result set or no call was issued yet.
-
-        Note there are performance considerations involved with the size
-        parameter. For optimal performance, it is usually best to use the
-        arraysize attribute. If the size parameter is used, then it is best
-        for it to retain the same value from one fetchmany() call to the next.
-        """
-
-    def fetchall():
-        """Fetch all (remaining) rows of a query result, returning them as a
-        sequence of sequences (e.g. a list of tuples). Note that the cursor's
-        arraysize attribute can affect the performance of this operation.
-
-        An Error (or subclass) exception is raised if the previous call to
-        executeZZZ() did not produce any result set or no call was issued yet.
-        """
-
-class IDBIConnection(Interface):
-    """A DB-API based Interface """
-
-    def cursor():
-        """Return a new IDBICursor Object using the connection.
-
-        If the database does not provide a direct cursor concept, the module
-        will have to emulate cursors using other means to the extent needed by
-        this specification.  """
-
-    def commit():
-        """Commit any pending transaction to the database. Note that if the
-        database supports an auto-commit feature, this must be initially off.
-        An interface method may be provided to turn it back on.
-
-        Database modules that do not support transactions should implement
-        this method with void functionality.
-        """
-
-    def rollback():
-        """In case a database does provide transactions this method causes the
-        database to roll back to the start of any pending transaction. Closing
-        a connection without committing the changes first will cause an
-        implicit rollback to be performed.  """
-
-    def close():
-        """Close the connection now (rather than whenever __del__ is
-        called). The connection will be unusable from this point forward; an
-        Error (or subclass) exception will be raised if any operation is
-        attempted with the connection. The same applies to all cursor objects
-        trying to use the connection.  """
-
-class ISQLCommand(Interface):
-    """Static SQL commands."""
-
-    connectionName = Attribute("""The name of the database connection
-    to use in getConnection """)
-
-    def getConnection():
-        """Get the database connection."""
-
-    def __call__():
-        """Execute an sql query and return a result object if appropriate"""
-
-class IZopeDatabaseAdapter(IDBITypeInfo):
-    """Interface for persistent object that returns
-    volatile IZopeConnections."""
-
-    def isConnected():
-        """Check whether the Zope Connection is actually connected to the
-        database."""
-
-    def __call__():
-        """Return an IZopeConnection object"""
-
-class IZopeDatabaseAdapterManagement(Interface):
-
-    def setDSN(dsn):
-        """Set the DSN for the Adapter instance"""
-
-    def getDSN():
-        """Get the DSN of the Adapter instance"""
-
-    dsn = TextLine(
-        title=_("DSN"),
-        description=_(
-        "Specify the DSN (Data Source Name) of the database. "
-        "Examples include:\n"
-        "\n"
-        "dbi://dbname\n"
-        "dbi://dbname;param1=value...\n"
-        "dbi://user:passwd/dbname\n"
-        "dbi://user:passwd/dbname;param1=value...\n"
-        "dbi://user:passwd@host:port/dbname\n"
-        "dbi://user:passwd@host:port/dbname;param1=value...\n"
-        "\n"
-        "All values should be properly URL-encoded."),
-        default=u"dbi://dbname",
-        required=True)
-
-    def connect():
-        """Connect to the specified database."""
-
-    def disconnect():
-        """Disconnect from the database."""
-
-class IManageableZopeDatabaseAdapter(IZopeDatabaseAdapter,
-                                     IZopeDatabaseAdapterManagement):
-    """Database adapters with management functions
-    """
-
-class IZopeConnection(IDBIConnection, IDBITypeInfoProvider):
-
-    # An implementation of this object will be exposed to the
-    # user. Therefore the Zope connection represents a connection in
-    # the Zope sense, meaning that the object might not be actually
-    # connected to a real relational database.
-
-    def cursor():
-        """Return an IZopeCursor object."""
-
-    def registerForTxn():
-        """Join the current transaction.
-
-        This method should only be inovoked by the Zope/DB transaction
-        manager.
-        """
-
-class IZopeCursor(IDBICursor):
-    """An IDBICursor that integrates with Zope's transactions"""
-
-    def execute(operation, parameters=None):
-        """Executes an operation, registering the underlying connection with
-        the transaction system.
-
-        See IDBICursor for more detailed execute information.
-        """
-
-    def executemany(operation, seq_of_parameters):
-        """Executes an operation, registering the underlying connection with
-        the transaction system.
-
-        See IDBICursor for more detailed executemany information.
-        """

Copied: zope.rdb/tags/3.4.1/src/zope/rdb/interfaces.py (from rev 91892, zope.rdb/trunk/src/zope/rdb/interfaces.py)
===================================================================
--- zope.rdb/tags/3.4.1/src/zope/rdb/interfaces.py	                        (rev 0)
+++ zope.rdb/tags/3.4.1/src/zope/rdb/interfaces.py	2008-10-10 16:53:36 UTC (rev 91998)
@@ -0,0 +1,329 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Relational Database Adapter interfaces.
+
+$Id$
+"""
+from zope.interface import Interface
+from zope.interface import Attribute
+from zope.schema import TextLine
+from zope.i18nmessageid import MessageFactory
+
+_ = MessageFactory('zope')
+
+
+class IDBITypeInfoProvider(Interface):
+    """This object can get the Type Info for a particular DBI
+    implementation."""
+
+    def getTypeInfo():
+        """Return an IDBITypeInfo object."""
+
+class IDBITypeInfo(Interface):
+    """Database adapter specific information"""
+
+    paramstyle = Attribute("""
+        String constant stating the type of parameter marker formatting
+        expected by the interface. Possible values are [2]:
+
+       'qmark' = Question mark style, e.g. '...WHERE name=?'
+       'numeric' = Numeric, positional style, e.g. '...WHERE name=:1'
+       'named' = Named style, e.g. '...WHERE name=:name'
+       'format' = ANSI C printf format codes, e.g. '...WHERE name=%s'
+       'pyformat' = Python extended format codes, e.g. '...WHERE name=%(name)s'
+       """)
+
+    threadsafety = Attribute("""
+        Integer constant stating the level of thread safety the interface
+        supports. Possible values are:
+
+            0 = Threads may not share the module.
+            1 = Threads may share the module, but not connections.
+            2 = Threads may share the module and connections.
+            3 = Threads may share the module, connections and cursors.
+
+        Sharing in the above context means that two threads may use a resource
+        without wrapping it using a mutex semaphore to implement resource
+        locking. Note that you cannot always make external resources thread
+        safe by managing access using a mutex: the resource may rely on global
+        variables or other external sources that are beyond your control.
+        """)
+
+    encoding = TextLine(
+        title=_("Database encoding"),
+        description=_("Encoding of the database content"),
+        default=u"utf-8",
+        required=False
+        )
+
+    def getEncoding():
+        """Get the database encoding."""
+
+    def setEncoding(encoding):
+        """Set the database encoding."""
+
+    def getConverter(type):
+        """Return a converter function for field type matching key"""
+
+class IResultSet(Interface):
+    """Holds results, and allows iteration."""
+
+    columns = Attribute("""A list of the column names of the returned result
+                           set.""")
+
+    def __getitem__(index):
+        """Return a brain row for index."""
+
+
+class DatabaseException(Exception):
+    """Generic Database Error"""
+
+class DatabaseAdapterError(DatabaseException):
+    pass
+
+arraysize = 1 # default constant, symbolic
+
+class IDBICursor(Interface):
+    """DB API ICursor interface"""
+
+    description = Attribute("""This read-only attribute is a sequence of
+        7-item sequences. Each of these sequences contains information
+        describing one result column: (name, type_code, display_size,
+        internal_size, precision, scale, null_ok). This attribute will be None
+        for operations that do not return rows or if the cursor has not had an
+        operation invoked via the executeZZZ() method yet.
+
+        The type_code can be interpreted by comparing it to the Type Objects
+        specified in the section below. """)
+
+    arraysize = Attribute("""This read/write attribute specifies the number of
+        rows to fetch at a time with fetchmany(). It defaults to 1 meaning to
+        fetch a single row at a time.
+
+        Implementations must observe this value with respect to the
+        fetchmany() method, but are free to interact with the database a
+        single row at a time. It may also be used in the implementation of
+        executemany().
+        """)
+
+    def close():
+        """Close the cursor now (rather than whenever __del__ is called).  The
+        cursor will be unusable from this point forward; an Error (or
+        subclass) exception will be raised if any operation is attempted with
+        the cursor.
+        """
+
+    def execute(operation, parameters=None):
+        """Prepare and execute a database operation (query or
+        command). Parameters may be provided as sequence or mapping and will
+        be bound to variables in the operation. Variables are specified in a
+        database-specific notation (see the module's paramstyle attribute for
+        details). [5]
+
+        A reference to the operation will be retained by the cursor. If the
+        same operation object is passed in again, then the cursor can optimize
+        its behavior. This is most effective for algorithms where the same
+        operation is used, but different parameters are bound to it (many
+        times).
+
+        For maximum efficiency when reusing an operation, it is best to use
+        the setinputsizes() method to specify the parameter types and sizes
+        ahead of time. It is legal for a parameter to not match the predefined
+        information; the implementation should compensate, possibly with a
+        loss of efficiency.
+
+        The parameters may also be specified as list of tuples to e.g. insert
+        multiple rows in a single operation, but this kind of usage is
+        depreciated: executemany() should be used instead.
+
+        Return values are not defined.
+        """
+
+    def executemany(operation, seq_of_parameters):
+        """Prepare a database operation (query or command) and then execute it
+        against all parameter sequences or mappings found in the sequence
+        seq_of_parameters.
+
+        Modules are free to implement this method using multiple calls to the
+        execute() method or by using array operations to have the database
+        process the sequence as a whole in one call.
+
+        The same comments as for execute() also apply accordingly to this
+        method.
+
+        Return values are not defined.
+        """
+
+    def fetchone():
+        """Fetch the next row of a query result set, returning a single
+        sequence, or None when no more data is available. [6]
+
+        An Error (or subclass) exception is raised if the previous call to
+        executeZZZ() did not produce any result set or no call was issued yet.
+        """
+
+    def fetchmany(size=arraysize):
+        """Fetch the next set of rows of a query result, returning a sequence
+        of sequences (e.g. a list of tuples). An empty sequence is returned
+        when no more rows are available.
+
+        The number of rows to fetch per call is specified by the parameter. If
+        it is not given, the cursor's arraysize determines the number of rows
+        to be fetched. The method should try to fetch as many rows as
+        indicated by the size parameter. If this is not possible due to the
+        specified number of rows not being available, fewer rows may be
+        returned.
+
+        An Error (or subclass) exception is raised if the previous call to
+        executeZZZ() did not produce any result set or no call was issued yet.
+
+        Note there are performance considerations involved with the size
+        parameter. For optimal performance, it is usually best to use the
+        arraysize attribute. If the size parameter is used, then it is best
+        for it to retain the same value from one fetchmany() call to the next.
+        """
+
+    def fetchall():
+        """Fetch all (remaining) rows of a query result, returning them as a
+        sequence of sequences (e.g. a list of tuples). Note that the cursor's
+        arraysize attribute can affect the performance of this operation.
+
+        An Error (or subclass) exception is raised if the previous call to
+        executeZZZ() did not produce any result set or no call was issued yet.
+        """
+
+class IDBIConnection(Interface):
+    """A DB-API based Interface """
+
+    def cursor():
+        """Return a new IDBICursor Object using the connection.
+
+        If the database does not provide a direct cursor concept, the module
+        will have to emulate cursors using other means to the extent needed by
+        this specification.  """
+
+    def commit():
+        """Commit any pending transaction to the database. Note that if the
+        database supports an auto-commit feature, this must be initially off.
+        An interface method may be provided to turn it back on.
+
+        Database modules that do not support transactions should implement
+        this method with void functionality.
+        """
+
+    def rollback():
+        """In case a database does provide transactions this method causes the
+        database to roll back to the start of any pending transaction. Closing
+        a connection without committing the changes first will cause an
+        implicit rollback to be performed.  """
+
+    def close():
+        """Close the connection now (rather than whenever __del__ is
+        called). The connection will be unusable from this point forward; an
+        Error (or subclass) exception will be raised if any operation is
+        attempted with the connection. The same applies to all cursor objects
+        trying to use the connection.  """
+
+class ISQLCommand(Interface):
+    """Static SQL commands."""
+
+    connectionName = Attribute("""The name of the database connection
+    to use in getConnection """)
+
+    def getConnection():
+        """Get the database connection."""
+
+    def __call__():
+        """Execute an sql query and return a result object if appropriate"""
+
+class IZopeDatabaseAdapter(IDBITypeInfo):
+    """Interface for persistent object that returns
+    volatile IZopeConnections."""
+
+    def isConnected():
+        """Check whether the Zope Connection is actually connected to the
+        database."""
+
+    def __call__():
+        """Return an IZopeConnection object"""
+
+class IZopeDatabaseAdapterManagement(Interface):
+
+    def setDSN(dsn):
+        """Set the DSN for the Adapter instance"""
+
+    def getDSN():
+        """Get the DSN of the Adapter instance"""
+
+    dsn = TextLine(
+        title=_("DSN"),
+        description=_(
+        "Specify the DSN (Data Source Name) of the database. "
+        "Examples include:\n"
+        "\n"
+        "dbi://dbname\n"
+        "dbi://dbname;param1=value...\n"
+        "dbi://user:passwd/dbname\n"
+        "dbi://user:passwd/dbname;param1=value...\n"
+        "dbi://user:passwd@host:port/dbname\n"
+        "dbi://user:passwd@host:port/dbname;param1=value...\n"
+        "\n"
+        "All values should be properly URL-encoded."),
+        default=u"dbi://dbname",
+        required=True)
+
+    def connect():
+        """Connect to the specified database."""
+
+    def disconnect():
+        """Disconnect from the database."""
+
+class IManageableZopeDatabaseAdapter(IZopeDatabaseAdapter,
+                                     IZopeDatabaseAdapterManagement):
+    """Database adapters with management functions
+    """
+
+class IZopeConnection(IDBIConnection, IDBITypeInfoProvider):
+
+    # An implementation of this object will be exposed to the
+    # user. Therefore the Zope connection represents a connection in
+    # the Zope sense, meaning that the object might not be actually
+    # connected to a real relational database.
+
+    def cursor():
+        """Return an IZopeCursor object."""
+
+    def registerForTxn():
+        """Join the current transaction.
+
+        This method should only be inovoked by the Zope/DB transaction
+        manager.
+        """
+
+class IZopeCursor(IDBICursor):
+    """An IDBICursor that integrates with Zope's transactions"""
+
+    def execute(operation, parameters=None):
+        """Executes an operation, registering the underlying connection with
+        the transaction system.
+
+        See IDBICursor for more detailed execute information.
+        """
+
+    def executemany(operation, seq_of_parameters):
+        """Executes an operation, registering the underlying connection with
+        the transaction system.
+
+        See IDBICursor for more detailed executemany information.
+        """

Modified: zope.rdb/tags/3.4.1/src/zope/rdb/tests/test_gadflyrootdirective.py
===================================================================
--- zope.rdb/trunk/src/zope/rdb/tests/test_gadflyrootdirective.py	2008-10-08 03:53:21 UTC (rev 91891)
+++ zope.rdb/tags/3.4.1/src/zope/rdb/tests/test_gadflyrootdirective.py	2008-10-10 16:53:36 UTC (rev 91998)
@@ -13,7 +13,7 @@
 ##############################################################################
 """Test 'rdb' ZCML Namespace Directives
 
-$Id: test_directives.py 25177 2004-06-02 13:17:31Z jim $
+$Id$
 """
 
 import os



More information about the Checkins mailing list