[Zope-Checkins] CVS: Zope/lib/python/third_party/docutils/docutils - __init__.py:1.1.4.1 core.py:1.1.4.1 examples.py:1.1.4.1 frontend.py:1.1.4.1 io.py:1.1.4.1 nodes.py:1.1.4.1 statemachine.py:1.1.4.1 urischemes.py:1.1.4.1 utils.py:1.1.4.1

Andreas Jung andreas at andreas-jung.com
Fri Oct 29 15:08:21 EDT 2004


Update of /cvs-repository/Zope/lib/python/third_party/docutils/docutils
In directory cvs.zope.org:/tmp/cvs-serv23727/lib/python/third_party/docutils/docutils

Added Files:
      Tag: Zope-2_7-branch
	__init__.py core.py examples.py frontend.py io.py nodes.py 
	statemachine.py urischemes.py utils.py 
Log Message:
moved docutils to lib/python/third_party


=== Added File Zope/lib/python/third_party/docutils/docutils/__init__.py ===
# Author: David Goodger
# Contact: goodger at python.org
# Revision: $Revision: 1.1.4.1 $
# Date: $Date: 2004/10/29 19:08:20 $
# Copyright: This module has been placed in the public domain.

"""
This is the Docutils (Python Documentation Utilities) package.

Package Structure
=================

Modules:

- __init__.py: Contains component base classes, exception classes, and
  Docutils `__version__`.

- core.py: Contains the ``Publisher`` class and ``publish_*()`` convenience
  functions.

- frontend.py: Runtime settings (command-line interface, configuration files)
  processing, for Docutils front-ends.

- io.py: Provides a uniform API for low-level input and output.

- nodes.py: Docutils document tree (doctree) node class library.

- statemachine.py: A finite state machine specialized for
  regular-expression-based text filters.

- urischemes.py: Contains a complete mapping of known URI addressing
  scheme names to descriptions.

- utils.py: Contains the ``Reporter`` system warning class and miscellaneous
  utilities.

Subpackages:

- languages: Language-specific mappings of terms.

- parsers: Syntax-specific input parser modules or packages.

- readers: Context-specific input handlers which understand the data
  source and manage a parser.

- transforms: Modules used by readers and writers to modify DPS
  doctrees.

- writers: Format-specific output translators.
"""

__docformat__ = 'reStructuredText'

__version__ = '0.3.5'
"""``major.minor.micro`` version number.  The micro number is bumped for API
changes, for new functionality, and for interim project releases.  The minor
number is bumped whenever there is a significant project release.  The major
number will be bumped when the project is feature-complete, and perhaps if
there is a major change in the design."""


class ApplicationError(StandardError): pass
class DataError(ApplicationError): pass


class SettingsSpec:

    """
    Runtime setting specification base class.

    SettingsSpec subclass objects used by `docutils.frontend.OptionParser`.
    """

    settings_spec = ()
    """Runtime settings specification.  Override in subclasses.

    Defines runtime settings and associated command-line options, as used by
    `docutils.frontend.OptionParser`.  This is a tuple of:

    - Option group title (string or `None` which implies no group, just a list
      of single options).
    
    - Description (string or `None`).
    
    - A sequence of option tuples.  Each consists of:

      - Help text (string)
      
      - List of option strings (e.g. ``['-Q', '--quux']``).
      
      - Dictionary of keyword arguments.  It contains arguments to the
        OptionParser/OptionGroup ``add_option`` method, possibly with the
        addition of a 'validator' keyword (see the
        `docutils.frontend.OptionParser.validators` instance attribute).  Runtime
        settings names are derived implicitly from long option names
        ('--a-setting' becomes ``settings.a_setting``) or explicitly from the
        'dest' keyword argument.  See optparse docs for more details.

    - More triples of group title, description, options, as many times as
      needed.  Thus, `settings_spec` tuples can be simply concatenated.
    """

    settings_defaults = None
    """A dictionary of defaults for settings not in `settings_spec` (internal
    settings, intended to be inaccessible by command-line and config file).
    Override in subclasses."""

    settings_default_overrides = None
    """A dictionary of auxiliary defaults, to override defaults for settings
    defined in other components.  Override in subclasses."""

    relative_path_settings = ()
    """Settings containing filesystem paths.  Override in subclasses.
    Settings listed here are to be interpreted relative to the current working
    directory."""

    config_section = None
    """The name of the config file section specific to this component
    (lowercase, no brackets).  Override in subclasses."""

    config_section_dependencies = None
    """A list of names of config file sections that are to be applied before
    `config_section`, in order (from general to specific).  In other words,
    the settings in `config_section` are to be overlaid on top of the settings
    from these sections.  The "general" section is assumed implicitly.
    Override in subclasses."""


class TransformSpec:

    """
    Runtime transform specification base class.

    TransformSpec subclass objects used by `docutils.transforms.Transformer`.
    """

    default_transforms = ()
    """Transforms required by this class.  Override in subclasses."""
    
    unknown_reference_resolvers = ()
    """List of functions to try to resolve unknown references.  Unknown
    references have a 'refname' attribute which doesn't correspond to any
    target in the document.  Called when FinalCheckVisitor is unable to find a
    correct target.  The list should contain functions which will try to
    resolve unknown references, with the following signature::

        def reference_resolver(node):
            '''Returns boolean: true if resolved, false if not.'''

    If the function is able to resolve the reference, it should also remove
    the 'refname' attribute and mark the node as resolved::

        del node['refname']
        node.resolved = 1

    Each function must have a "priority" attribute which will affect the order
    the unknown_reference_resolvers are run::

        reference_resolver.priority = 100

    Override in subclasses."""


class Component(SettingsSpec, TransformSpec):

    """Base class for Docutils components."""

    component_type = None
    """Name of the component type ('reader', 'parser', 'writer').  Override in
    subclasses."""

    supported = ()
    """Names for this component.  Override in subclasses."""
    
    def supports(self, format):
        """
        Is `format` supported by this component?

        To be used by transforms to ask the dependent component if it supports
        a certain input context or output format.
        """
        return format in self.supported


=== Added File Zope/lib/python/third_party/docutils/docutils/core.py ===
# Authors: David Goodger
# Contact: goodger at python.org
# Revision: $Revision: 1.1.4.1 $
# Date: $Date: 2004/10/29 19:08:20 $
# Copyright: This module has been placed in the public domain.

"""
Calling the ``publish_*`` convenience functions (or instantiating a
`Publisher` object) with component names will result in default
behavior.  For custom behavior (setting component options), create
custom component objects first, and pass *them* to
``publish_*``/`Publisher`.  See `The Docutils Publisher`_.

.. _The Docutils Publisher: http://docutils.sf.net/docs/api/publisher.html
"""

__docformat__ = 'reStructuredText'

import sys
import pprint
from docutils import __version__, SettingsSpec
from docutils import frontend, io, utils, readers, writers
from docutils.frontend import OptionParser


class Publisher:

    """
    A facade encapsulating the high-level logic of a Docutils system.
    """

    def __init__(self, reader=None, parser=None, writer=None,
                 source=None, source_class=io.FileInput,
                 destination=None, destination_class=io.FileOutput,
                 settings=None):
        """
        Initial setup.  If any of `reader`, `parser`, or `writer` are not
        specified, the corresponding ``set_...`` method should be called with
        a component name (`set_reader` sets the parser as well).
        """

        self.reader = reader
        """A `docutils.readers.Reader` instance."""

        self.parser = parser
        """A `docutils.parsers.Parser` instance."""

        self.writer = writer
        """A `docutils.writers.Writer` instance."""

        self.source = source
        """The source of input data, a `docutils.io.Input` instance."""

        self.source_class = source_class
        """The class for dynamically created source objects."""

        self.destination = destination
        """The destination for docutils output, a `docutils.io.Output`
        instance."""

        self.destination_class = destination_class
        """The class for dynamically created destination objects."""

        self.settings = settings
        """An object containing Docutils settings as instance attributes.
        Set by `self.process_command_line()` or `self.get_settings()`."""

    def set_reader(self, reader_name, parser, parser_name):
        """Set `self.reader` by name."""
        reader_class = readers.get_reader_class(reader_name)
        self.reader = reader_class(parser, parser_name)
        self.parser = self.reader.parser

    def set_writer(self, writer_name):
        """Set `self.writer` by name."""
        writer_class = writers.get_writer_class(writer_name)
        self.writer = writer_class()

    def set_components(self, reader_name, parser_name, writer_name):
        if self.reader is None:
            self.set_reader(reader_name, self.parser, parser_name)
        if self.parser is None:
            if self.reader.parser is None:
                self.reader.set_parser(parser_name)
            self.parser = self.reader.parser
        if self.writer is None:
            self.set_writer(writer_name)

    def setup_option_parser(self, usage=None, description=None,
                            settings_spec=None, config_section=None,
                            **defaults):
        if config_section:
            if not settings_spec:
                settings_spec = SettingsSpec()
            settings_spec.config_section = config_section
            parts = config_section.split()
            if len(parts) > 1 and parts[-1] == 'application':
                settings_spec.config_section_dependencies = ['applications']
        #@@@ Add self.source & self.destination to components in future?
        option_parser = OptionParser(
            components=(self.parser, self.reader, self.writer, settings_spec),
            defaults=defaults, read_config_files=1,
            usage=usage, description=description)
        return option_parser

    def get_settings(self, usage=None, description=None,
                     settings_spec=None, config_section=None, **defaults):
        """
        Set and return default settings (overrides in `defaults` dict).

        Set components first (`self.set_reader` & `self.set_writer`).
        Explicitly setting `self.settings` disables command line option
        processing from `self.publish()`.
        """
        option_parser = self.setup_option_parser(
            usage, description, settings_spec, config_section, **defaults)
        self.settings = option_parser.get_default_values()
        return self.settings

    def process_programmatic_settings(self, settings_spec,
                                      settings_overrides,
                                      config_section):
        if self.settings is None:
            defaults = (settings_overrides or {}).copy()
            # Propagate exceptions by default when used programmatically:
            defaults.setdefault('traceback', 1)
            self.get_settings(settings_spec=settings_spec,
                              config_section=config_section,
                              **defaults)

    def process_command_line(self, argv=None, usage=None, description=None,
                             settings_spec=None, config_section=None,
                             **defaults):
        """
        Pass an empty list to `argv` to avoid reading `sys.argv` (the
        default).

        Set components first (`self.set_reader` & `self.set_writer`).
        """
        option_parser = self.setup_option_parser(
            usage, description, settings_spec, config_section, **defaults)
        if argv is None:
            argv = sys.argv[1:]
        self.settings = option_parser.parse_args(argv)

    def set_io(self, source_path=None, destination_path=None):
        if self.source is None:
            self.set_source(source_path=source_path)
        if self.destination is None:
            self.set_destination(destination_path=destination_path)

    def set_source(self, source=None, source_path=None):
        if source_path is None:
            source_path = self.settings._source
        else:
            self.settings._source = source_path
        self.source = self.source_class(
            source=source, source_path=source_path,
            encoding=self.settings.input_encoding)

    def set_destination(self, destination=None, destination_path=None):
        if destination_path is None:
            destination_path = self.settings._destination
        else:
            self.settings._destination = destination_path
        self.destination = self.destination_class(
            destination=destination, destination_path=destination_path,
            encoding=self.settings.output_encoding,
            error_handler=self.settings.output_encoding_error_handler)

    def apply_transforms(self, document):
        document.transformer.populate_from_components(
            (self.source, self.reader, self.reader.parser, self.writer,
             self.destination))
        document.transformer.apply_transforms()

    def publish(self, argv=None, usage=None, description=None,
                settings_spec=None, settings_overrides=None,
                config_section=None, enable_exit_status=None):
        """
        Process command line options and arguments (if `self.settings` not
        already set), run `self.reader` and then `self.writer`.  Return
        `self.writer`'s output.
        """
        if self.settings is None:
            self.process_command_line(
                argv, usage, description, settings_spec, config_section,
                **(settings_overrides or {}))
        self.set_io()
        exit = None
        document = None
        try:
            document = self.reader.read(self.source, self.parser,
                                        self.settings)
            self.apply_transforms(document)
            output = self.writer.write(document, self.destination)
            self.writer.assemble_parts()
        except Exception, error:
            if self.settings.traceback: # propagate exceptions?
                raise
            self.report_Exception(error)
            exit = 1
        self.debugging_dumps(document)
        if (enable_exit_status and document
            and (document.reporter.max_level
                 >= self.settings.exit_status_level)):
            sys.exit(document.reporter.max_level + 10)
        elif exit:
            sys.exit(1)
        return output

    def debugging_dumps(self, document):
        if self.settings.dump_settings:
            print >>sys.stderr, '\n::: Runtime settings:'
            print >>sys.stderr, pprint.pformat(self.settings.__dict__)
        if self.settings.dump_internals and document:
            print >>sys.stderr, '\n::: Document internals:'
            print >>sys.stderr, pprint.pformat(document.__dict__)
        if self.settings.dump_transforms and document:
            print >>sys.stderr, '\n::: Transforms applied:'
            print >>sys.stderr, pprint.pformat(document.transformer.applied)
        if self.settings.dump_pseudo_xml and document:
            print >>sys.stderr, '\n::: Pseudo-XML:'
            print >>sys.stderr, document.pformat().encode(
                'raw_unicode_escape')

    def report_Exception(self, error):
        if isinstance(error, utils.SystemMessage):
            self.report_SystemMessage(error)
        elif isinstance(error, UnicodeError):
            self.report_UnicodeError(error)
        else:
            print >>sys.stderr, '%s: %s' % (error.__class__.__name__, error)
            print >>sys.stderr, ("""\
Exiting due to error.  Use "--traceback" to diagnose.
Please report errors to <docutils-users at lists.sf.net>.
Include "--traceback" output, Docutils version (%s),
Python version (%s), your OS type & version, and the
command line used.""" % (__version__, sys.version.split()[0]))

    def report_SystemMessage(self, error):
        print >>sys.stderr, ('Exiting due to level-%s (%s) system message.'
                             % (error.level,
                                utils.Reporter.levels[error.level]))

    def report_UnicodeError(self, error):
        sys.stderr.write(
            '%s: %s\n'
            '\n'
            'The specified output encoding (%s) cannot\n'
            'handle all of the output.\n'
            'Try setting "--output-encoding-error-handler" to\n'
            '\n'
            '* "xmlcharrefreplace" (for HTML & XML output);\n'
            % (error.__class__.__name__, error,
               self.settings.output_encoding))
        try:
            data = error.object[error.start:error.end]
            sys.stderr.write(
                '  the output will contain "%s" and should be usable.\n'
                '* "backslashreplace" (for other output formats, Python 2.3+);\n'
                '  look for "%s" in the output.\n'
                % (data.encode('ascii', 'xmlcharrefreplace'),
                   data.encode('ascii', 'backslashreplace')))
        except AttributeError:
            sys.stderr.write('  the output should be usable as-is.\n')
        sys.stderr.write(
            '* "replace"; look for "?" in the output.\n'
            '\n'
            '"--output-encoding-error-handler" is currently set to "%s".\n'
            '\n'
            'Exiting due to error.  Use "--traceback" to diagnose.\n'
            'If the advice above doesn\'t eliminate the error,\n'
            'please report it to <docutils-users at lists.sf.net>.\n'
            'Include "--traceback" output, Docutils version (%s),\n'
            'Python version (%s), your OS type & version, and the\n'
            'command line used.\n'
            % (self.settings.output_encoding_error_handler,
               __version__, sys.version.split()[0]))

default_usage = '%prog [options] [<source> [<destination>]]'
default_description = ('Reads from <source> (default is stdin) and writes to '
                       '<destination> (default is stdout).')

def publish_cmdline(reader=None, reader_name='standalone',
                    parser=None, parser_name='restructuredtext',
                    writer=None, writer_name='pseudoxml',
                    settings=None, settings_spec=None,
                    settings_overrides=None, config_section=None,
                    enable_exit_status=1, argv=None,
                    usage=default_usage, description=default_description):
    """
    Set up & run a `Publisher` for command-line-based file I/O (input and
    output file paths taken automatically from the command line).  Return the
    encoded string output also.

    Parameters: see `publish_programmatically` for the remainder.

    - `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
    - `usage`: Usage string, output if there's a problem parsing the command
      line.
    - `description`: Program description, output for the "--help" option
      (along with command-line option descriptions).
    """
    pub = Publisher(reader, parser, writer, settings=settings)
    pub.set_components(reader_name, parser_name, writer_name)
    output = pub.publish(
        argv, usage, description, settings_spec, settings_overrides,
        config_section=config_section, enable_exit_status=enable_exit_status)
    return output

def publish_file(source=None, source_path=None,
                 destination=None, destination_path=None,
                 reader=None, reader_name='standalone',
                 parser=None, parser_name='restructuredtext',
                 writer=None, writer_name='pseudoxml',
                 settings=None, settings_spec=None, settings_overrides=None,
                 config_section=None, enable_exit_status=None):
    """
    Set up & run a `Publisher` for programmatic use with file-like I/O.
    Return the encoded string output also.

    Parameters: see `publish_programmatically`.
    """
    output, pub = publish_programmatically(
        source_class=io.FileInput, source=source, source_path=source_path,
        destination_class=io.FileOutput,
        destination=destination, destination_path=destination_path,
        reader=reader, reader_name=reader_name,
        parser=parser, parser_name=parser_name,
        writer=writer, writer_name=writer_name,
        settings=settings, settings_spec=settings_spec,
        settings_overrides=settings_overrides,
        config_section=config_section,
        enable_exit_status=enable_exit_status)
    return output

def publish_string(source, source_path=None, destination_path=None,
                   reader=None, reader_name='standalone',
                   parser=None, parser_name='restructuredtext',
                   writer=None, writer_name='pseudoxml',
                   settings=None, settings_spec=None,
                   settings_overrides=None, config_section=None,
                   enable_exit_status=None):
    """
    Set up & run a `Publisher` for programmatic use with string I/O.  Return
    the encoded string or Unicode string output.

    For encoded string output, be sure to set the 'output_encoding' setting to
    the desired encoding.  Set it to 'unicode' for unencoded Unicode string
    output.  Here's one way::

        publish_string(..., settings_overrides={'output_encoding': 'unicode'})

    Similarly for Unicode string input (`source`)::

        publish_string(..., settings_overrides={'input_encoding': 'unicode'})

    Parameters: see `publish_programmatically`.
    """
    output, pub = publish_programmatically(
        source_class=io.StringInput, source=source, source_path=source_path,
        destination_class=io.StringOutput,
        destination=None, destination_path=destination_path,
        reader=reader, reader_name=reader_name,
        parser=parser, parser_name=parser_name,
        writer=writer, writer_name=writer_name,
        settings=settings, settings_spec=settings_spec,
        settings_overrides=settings_overrides,
        config_section=config_section,
        enable_exit_status=enable_exit_status)
    return output

def publish_parts(source, source_path=None, destination_path=None,
                  reader=None, reader_name='standalone',
                  parser=None, parser_name='restructuredtext',
                  writer=None, writer_name='pseudoxml',
                  settings=None, settings_spec=None,
                  settings_overrides=None, config_section=None,
                  enable_exit_status=None):
    """
    Set up & run a `Publisher`, and return a dictionary of document parts.
    Dictionary keys are the names of parts, and values are Unicode strings;
    encoding is up to the client.  For programmatic use with string I/O.

    For encoded string input, be sure to set the 'input_encoding' setting to
    the desired encoding.  Set it to 'unicode' for unencoded Unicode string
    input.  Here's how::

        publish_string(..., settings_overrides={'input_encoding': 'unicode'})

    Parameters: see `publish_programmatically`.
    """
    output, pub = publish_programmatically(
        source_class=io.StringInput, source=source, source_path=source_path,
        destination_class=io.StringOutput,
        destination=None, destination_path=destination_path,
        reader=reader, reader_name=reader_name,
        parser=parser, parser_name=parser_name,
        writer=writer, writer_name=writer_name,
        settings=settings, settings_spec=settings_spec,
        settings_overrides=settings_overrides,
        config_section=config_section,
        enable_exit_status=enable_exit_status)
    return pub.writer.parts

def publish_programmatically(source_class, source, source_path,
                            destination_class, destination, destination_path,
                            reader, reader_name,
                            parser, parser_name,
                            writer, writer_name,
                            settings, settings_spec,
                            settings_overrides, config_section,
                            enable_exit_status):
    """
    Set up & run a `Publisher` for custom programmatic use.  Return the
    encoded string output and the Publisher object.

    Applications should not need to call this function directly.  If it does
    seem to be necessary to call this function directly, please write to the
    docutils-develop at lists.sourceforge.net mailing list.

    Parameters:

    * `source_class` **required**: The class for dynamically created source
      objects.  Typically `io.FileInput` or `io.StringInput`.

    * `source`: Type depends on `source_class`:

      - `io.FileInput`: Either a file-like object (must have 'read' and
        'close' methods), or ``None`` (`source_path` is opened).  If neither
        `source` nor `source_path` are supplied, `sys.stdin` is used.

      - `io.StringInput` **required**: The input string, either an encoded
        8-bit string (set the 'input_encoding' setting to the correct
        encoding) or a Unicode string (set the 'input_encoding' setting to
        'unicode').

    * `source_path`: Type depends on `source_class`:

      - `io.FileInput`: Path to the input file, opened if no `source`
        supplied.

      - `io.StringInput`: Optional.  Path to the file or object that produced
        `source`.  Only used for diagnostic output.

    * `destination_class` **required**: The class for dynamically created
      destination objects.  Typically `io.FileOutput` or `io.StringOutput`.

    * `destination`: Type depends on `destination_class`:

      - `io.FileOutput`: Either a file-like object (must have 'write' and
        'close' methods), or ``None`` (`destination_path` is opened).  If
        neither `destination` nor `destination_path` are supplied,
        `sys.stdout` is used.

      - `io.StringOutput`: Not used; pass ``None``.

    * `destination_path`: Type depends on `destination_class`:

      - `io.FileOutput`: Path to the output file.  Opened if no `destination`
        supplied.

      - `io.StringOutput`: Path to the file or object which will receive the
        output; optional.  Used for determining relative paths (stylesheets,
        source links, etc.).

    * `reader`: A `docutils.readers.Reader` object.

    * `reader_name`: Name or alias of the Reader class to be instantiated if
      no `reader` supplied.

    * `parser`: A `docutils.parsers.Parser` object.

    * `parser_name`: Name or alias of the Parser class to be instantiated if
      no `parser` supplied.

    * `writer`: A `docutils.writers.Writer` object.

    * `writer_name`: Name or alias of the Writer class to be instantiated if
      no `writer` supplied.

    * `settings`: A runtime settings (`docutils.frontend.Values`) object, for
      dotted-attribute access to runtime settings.  It's the end result of the
      `SettingsSpec`, config file, and option processing.  If `settings` is
      passed, it's assumed to be complete and no further setting/config/option
      processing is done.

    * `settings_spec`: A `docutils.SettingsSpec` subclass or object.  Provides
      extra application-specific settings definitions independently of
      components.  In other words, the application becomes a component, and
      its settings data is processed along with that of the other components.
      Used only if no `settings` specified.

    * `settings_overrides`: A dictionary containing application-specific
      settings defaults that override the defaults of other components.
      Used only if no `settings` specified.

    * `config_section`: A string, the name of the configuration file section
      for this application.  Overrides the ``config_section`` attribute
      defined by `settings_spec`.  Used only if no `settings` specified.

    * `enable_exit_status`: Boolean; enable exit status at end of processing?
    """
    pub = Publisher(reader, parser, writer, settings=settings,
                    source_class=source_class,
                    destination_class=destination_class)
    pub.set_components(reader_name, parser_name, writer_name)
    pub.process_programmatic_settings(
        settings_spec, settings_overrides, config_section)
    pub.set_source(source, source_path)
    pub.set_destination(destination, destination_path)
    output = pub.publish(enable_exit_status=enable_exit_status)
    return output, pub


=== Added File Zope/lib/python/third_party/docutils/docutils/examples.py ===
# Authors: David Goodger
# Contact: goodger at python.org
# Revision: $Revision: 1.1.4.1 $
# Date: $Date: 2004/10/29 19:08:20 $
# Copyright: This module has been placed in the public domain.

"""
This module contains practical examples of Docutils client code.

Importing this module is not recommended; its contents are subject to change
in future Docutils releases.  Instead, it is recommended that you copy and
paste the parts you need into your own code, modifying as necessary.
"""

from docutils import core


def html_parts(input_string, source_path=None, destination_path=None,
               input_encoding='unicode', doctitle=1, initial_header_level=1):
    """
    Given an input string, returns a dictionary of HTML document parts.

    Dictionary keys are the names of parts, and values are Unicode strings;
    encoding is up to the client.

    Parameters:

    - `input_string`: A multi-line text string; required.
    - `source_path`: Path to the source file or object.  Optional, but useful
      for diagnostic output (system messages).
    - `destination_path`: Path to the file or object which will receive the
      output; optional.  Used for determining relative paths (stylesheets,
      source links, etc.).
    - `input_encoding`: The encoding of `input_string`.  If it is an encoded
      8-bit string, provide the correct encoding.  If it is a Unicode string,
      use "unicode", the default.
    - `doctitle`: Disable the promotion of a lone top-level section title to
      document title (and subsequent section title to document subtitle
      promotion); enabled by default.
    - `initial_header_level`: The initial level for header elements (e.g. 1
      for "<h1>").
    """
    overrides = {'input_encoding': input_encoding,
                 'doctitle_xform': doctitle,
                 'initial_header_level': initial_header_level}
    parts = core.publish_parts(
        source=input_string, source_path=source_path,
        destination_path=destination_path,
        writer_name='html', settings_overrides=overrides)
    return parts

def html_fragment(input_string, source_path=None, destination_path=None,
                  input_encoding='unicode', output_encoding='unicode',
                  doctitle=1, initial_header_level=1):
    """
    Given an input string, returns an HTML fragment as a string.

    The return value is the contents of the <body> tag, less the title,
    subtitle, and docinfo.

    Parameters (see `html_parts()` for the remainder):

    - `output_encoding`: The desired encoding of the output.  If a Unicode
      string is desired, use the default value of "unicode" .
    """
    parts = html_parts(
        input_string=input_string, source_path=source_path,
        destination_path=destination_path,
        input_encoding=input_encoding, doctitle=doctitle,
        initial_header_level=initial_header_level)
    fragment = parts['fragment']
    if output_encoding != 'unicode':
        fragment = fragment.encode(output_encoding)
    return fragment


=== Added File Zope/lib/python/third_party/docutils/docutils/frontend.py ===
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
# Revision: $Revision: 1.1.4.1 $
# Date: $Date: 2004/10/29 19:08:20 $
# Copyright: This module has been placed in the public domain.

"""
Command-line and common processing for Docutils front-end tools.

Exports the following classes:

* `OptionParser`: Standard Docutils command-line processing.
* `Option`: Customized version of `optparse.Option`; validation support.
* `Values`: Runtime settings; objects are simple structs
  (``object.attribute``).  Supports cumulative list settings (attributes).
* `ConfigParser`: Standard Docutils config file processing.

Also exports the following functions:

* Option callbacks: `store_multiple`, `read_config_file`.
* Setting validators (see `OptionParser.validators`: `validate_encoding`,
  `validate_encoding_error_handler`, `validate_encoding_and_error_handler`,
  `validate_boolean`, `validate_threshold`,
  `validate_colon_separated_string_list`.
* `make_paths_absolute`.
"""

__docformat__ = 'reStructuredText'

import os
import os.path
import sys
import types
import copy
import warnings
import ConfigParser as CP
import codecs
import docutils
import optparse
from optparse import Values, SUPPRESS_HELP


def store_multiple(option, opt, value, parser, *args, **kwargs):
    """
    Store multiple values in `parser.values`.  (Option callback.)

    Store `None` for each attribute named in `args`, and store the value for
    each key (attribute name) in `kwargs`.
    """
    for attribute in args:
        setattr(parser.values, attribute, None)
    for key, value in kwargs.items():
        setattr(parser.values, key, value)

def read_config_file(option, opt, value, parser):
    """
    Read a configuration file during option processing.  (Option callback.)
    """
    try:
        new_settings = parser.get_config_file_settings(value)
    except ValueError, error:
        parser.error(error)
    parser.values.update(new_settings, parser)

def validate_encoding(setting, value, option_parser,
                      config_parser=None, config_section=None):
    try:
        codecs.lookup(value)
    except LookupError:
        raise (LookupError('setting "%s": unknown encoding: "%s"'
                           % (setting, value)),
               None, sys.exc_info()[2])
    return value

def validate_encoding_error_handler(setting, value, option_parser,
                                    config_parser=None, config_section=None):
    try:
        codecs.lookup_error(value)
    except AttributeError:              # prior to Python 2.3
        if value not in ('strict', 'ignore', 'replace', 'xmlcharrefreplace'):
            raise (LookupError(
                'unknown encoding error handler: "%s" (choices: '
                '"strict", "ignore", "replace", or "xmlcharrefreplace")' % value),
                   None, sys.exc_info()[2])
    except LookupError:
        raise (LookupError(
            'unknown encoding error handler: "%s" (choices: '
            '"strict", "ignore", "replace", "backslashreplace", '
            '"xmlcharrefreplace", and possibly others; see documentation for '
            'the Python ``codecs`` module)' % value),
               None, sys.exc_info()[2])
    return value

def validate_encoding_and_error_handler(
    setting, value, option_parser, config_parser=None, config_section=None):
    """
    Side-effect: if an error handler is included in the value, it is inserted
    into the appropriate place as if it was a separate setting/option.
    """
    if ':' in value:
        encoding, handler = value.split(':')
        validate_encoding_error_handler(
            setting + '_error_handler', handler, option_parser,
            config_parser, config_section)
        if config_parser:
            config_parser.set(config_section, setting + '_error_handler',
                              handler)
        else:
            setattr(option_parser.values, setting + '_error_handler', handler)
    else:
        encoding = value
    validate_encoding(setting, encoding, option_parser,
                      config_parser, config_section)
    return encoding

def validate_boolean(setting, value, option_parser,
                     config_parser=None, config_section=None):
    if isinstance(value, types.StringType):
        try:
            return option_parser.booleans[value.strip().lower()]
        except KeyError:
            raise (LookupError('unknown boolean value: "%s"' % value),
                   None, sys.exc_info()[2])
    return value

def validate_threshold(setting, value, option_parser,
                       config_parser=None, config_section=None):
    try:
        return int(value)
    except ValueError:
        try:
            return option_parser.thresholds[value.lower()]
        except (KeyError, AttributeError):
            raise (LookupError('unknown threshold: %r.' % value),
                   None, sys.exc_info[2])

def validate_colon_separated_string_list(
    setting, value, option_parser, config_parser=None, config_section=None):
    if isinstance(value, types.StringType):
        value = value.split(':')
    else:
        last = value.pop()
        value.extend(last.split(':'))
    return value

def validate_url_trailing_slash(
    setting, value, option_parser, config_parser=None, config_section=None):
    if not value:
        return './'
    elif value.endswith('/'):
        return value
    else:
        return value + '/'

def make_paths_absolute(pathdict, keys, base_path=None):
    """
    Interpret filesystem path settings relative to the `base_path` given.

    Paths are values in `pathdict` whose keys are in `keys`.  Get `keys` from
    `OptionParser.relative_path_settings`.
    """
    if base_path is None:
        base_path = os.getcwd()
    for key in keys:
        if pathdict.has_key(key):
            value = pathdict[key]
            if isinstance(value, types.ListType):
                value = [make_one_path_absolute(base_path, path)
                         for path in value]
            elif value:
                value = make_one_path_absolute(base_path, value)
            pathdict[key] = value

def make_one_path_absolute(base_path, path):
    return os.path.abspath(os.path.join(base_path, path))


class Values(optparse.Values):

    """
    Updates list attributes by extension rather than by replacement.
    Works in conjunction with the `OptionParser.lists` instance attribute.
    """

    def update(self, other_dict, option_parser):
        if isinstance(other_dict, Values):
            other_dict = other_dict.__dict__
        other_dict = other_dict.copy()
        for setting in option_parser.lists.keys():
            if (hasattr(self, setting) and other_dict.has_key(setting)):
                value = getattr(self, setting)
                if value:
                    value += other_dict[setting]
                    del other_dict[setting]
        self._update_loose(other_dict)


class Option(optparse.Option):

    def process(self, opt, value, values, parser):
        """
        Call the validator function on applicable settings.
        Extends `optparse.Option.process`.
        """
        result = optparse.Option.process(self, opt, value, values, parser)
        setting = self.dest
        if setting:
            value = getattr(values, setting)
            validator = parser.validators.get(setting)
            if validator:
                try:
                    new_value = validator(setting, value, parser)
                except Exception, error:
                    raise (optparse.OptionValueError(
                        'Error in option "%s":\n    %s: %s'
                        % (opt, error.__class__.__name__, error)),
                           None, sys.exc_info()[2])
                setattr(values, setting, new_value)
        return result


class OptionParser(optparse.OptionParser, docutils.SettingsSpec):

    """
    Parser for command-line and library use.  The `settings_spec`
    specification here and in other Docutils components are merged to build
    the set of command-line options and runtime settings for this process.

    Common settings (defined below) and component-specific settings must not
    conflict.  Short options are reserved for common settings, and components
    are restrict to using long options.
    """

    standard_config_files = [
        '/etc/docutils.conf',           # system-wide
        './docutils.conf',              # project-specific
        '~/.docutils']                  # user-specific
    """Docutils configuration files, using ConfigParser syntax.  Filenames
    will be tilde-expanded later.  Later files override earlier ones."""

    threshold_choices = 'info 1 warning 2 error 3 severe 4 none 5'.split()
    """Possible inputs for for --report and --halt threshold values."""

    thresholds = {'info': 1, 'warning': 2, 'error': 3, 'severe': 4, 'none': 5}
    """Lookup table for --report and --halt threshold values."""

    booleans={'1': 1, 'on': 1, 'yes': 1, 'true': 1,
              '0': 0, 'off': 0, 'no': 0, 'false': 0, '': 0}
    """Lookup table for boolean configuration file settings."""

    if hasattr(codecs, 'backslashreplace_errors'):
        default_error_encoding_error_handler = 'backslashreplace'
    else:
        default_error_encoding_error_handler = 'replace'

    settings_spec = (
        'General Docutils Options',
        None,
        (('Include a "Generated by Docutils" credit and link at the end '
          'of the document.',
          ['--generator', '-g'], {'action': 'store_true',
                                  'validator': validate_boolean}),
         ('Do not include a generator credit.',
          ['--no-generator'], {'action': 'store_false', 'dest': 'generator'}),
         ('Include the date at the end of the document (UTC).',
          ['--date', '-d'], {'action': 'store_const', 'const': '%Y-%m-%d',
                             'dest': 'datestamp'}),
         ('Include the time & date at the end of the document (UTC).',
          ['--time', '-t'], {'action': 'store_const',
                             'const': '%Y-%m-%d %H:%M UTC',
                             'dest': 'datestamp'}),
         ('Do not include a datestamp of any kind.',
          ['--no-datestamp'], {'action': 'store_const', 'const': None,
                               'dest': 'datestamp'}),
         ('Include a "View document source" link (relative to destination).',
          ['--source-link', '-s'], {'action': 'store_true',
                                    'validator': validate_boolean}),
         ('Use the supplied <URL> verbatim for a "View document source" '
          'link; implies --source-link.',
          ['--source-url'], {'metavar': '<URL>'}),
         ('Do not include a "View document source" link.',
          ['--no-source-link'],
          {'action': 'callback', 'callback': store_multiple,
           'callback_args': ('source_link', 'source_url')}),
         ('Enable backlinks from section headers to table of contents '
          'entries.  This is the default.',
          ['--toc-entry-backlinks'],
          {'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'entry',
           'default': 'entry'}),
         ('Enable backlinks from section headers to the top of the table of '
          'contents.',
          ['--toc-top-backlinks'],
          {'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'top'}),
         ('Disable backlinks to the table of contents.',
          ['--no-toc-backlinks'],
          {'dest': 'toc_backlinks', 'action': 'store_false'}),
         ('Enable backlinks from footnotes and citations to their '
          'references.  This is the default.',
          ['--footnote-backlinks'],
          {'action': 'store_true', 'default': 1,
           'validator': validate_boolean}),
         ('Disable backlinks from footnotes and citations.',
          ['--no-footnote-backlinks'],
          {'dest': 'footnote_backlinks', 'action': 'store_false'}),
         ('Disable Docutils section numbering',
          ['--no-section-numbering'],
          {'action': 'store_false', 'dest': 'sectnum_xform',
           'default': 1, 'validator': validate_boolean}),
         ('Set verbosity threshold; report system messages at or higher than '
          '<level> (by name or number: "info" or "1", warning/2, error/3, '
          'severe/4; also, "none" or "5").  Default is 2 (warning).',
          ['--report', '-r'], {'choices': threshold_choices, 'default': 2,
                               'dest': 'report_level', 'metavar': '<level>',
                               'validator': validate_threshold}),
         ('Report all system messages, info-level and higher.  (Same as '
          '"--report=info".)',
          ['--verbose', '-v'], {'action': 'store_const', 'const': 'info',
                                'dest': 'report_level'}),
         ('Do not report any system messages.  (Same as "--report=none".)',
          ['--quiet', '-q'], {'action': 'store_const', 'const': 'none',
                              'dest': 'report_level'}),
         ('Set the threshold (<level>) at or above which system messages are '
          'converted to exceptions, halting execution immediately by '
          'exiting (or propagating the exception if --traceback set).  '
          'Levels as in --report.  Default is 4 (severe).',
          ['--halt'], {'choices': threshold_choices, 'dest': 'halt_level',
                       'default': 4, 'metavar': '<level>',
                       'validator': validate_threshold}),
         ('Same as "--halt=info": halt processing at the slightest problem.',
          ['--strict'], {'action': 'store_const', 'const': 'info',
                         'dest': 'halt_level'}),
         ('Enable a non-zero exit status for normal exit if non-halting '
          'system messages (at or above <level>) were generated.  Levels as '
          'in --report.  Default is 5 (disabled).  Exit status is the maximum '
          'system message level plus 10 (11 for INFO, etc.).',
          ['--exit-status'], {'choices': threshold_choices,
                              'dest': 'exit_status_level',
                              'default': 5, 'metavar': '<level>',
                              'validator': validate_threshold}),
         ('Report debug-level system messages and generate diagnostic output.',
          ['--debug'], {'action': 'store_true', 'validator': validate_boolean}),
         ('Do not report debug-level system messages or generate diagnostic '
          'output.',
          ['--no-debug'], {'action': 'store_false', 'dest': 'debug'}),
         ('Send the output of system messages (warnings) to <file>.',
          ['--warnings'], {'dest': 'warning_stream', 'metavar': '<file>'}),
         ('Enable Python tracebacks when halt-level system messages and '
          'other exceptions occur.  Useful for debugging, and essential for '
          'issue reports.',
          ['--traceback'], {'action': 'store_true', 'default': None,
                            'validator': validate_boolean}),
         ('Disable Python tracebacks when errors occur; report just the error '
          'instead.  This is the default.',
          ['--no-traceback'], {'dest': 'traceback', 'action': 'store_false'}),
         ('Specify the encoding of input text.  Default is locale-dependent.',
          ['--input-encoding', '-i'],
          {'metavar': '<name>', 'validator': validate_encoding}),
         ('Specify the text encoding for output.  Default is UTF-8.  '
          'Optionally also specify the error handler for unencodable '
          'characters, after a colon (":"); default is "strict".  (See '
          '"--output-encoding-error-encoding".)',
          ['--output-encoding', '-o'],
          {'metavar': '<name[:handler]>', 'default': 'utf-8',
           'validator': validate_encoding_and_error_handler}),
         ('Specify the error handler for unencodable characters in '
          'the output.  Acceptable values include "strict", "ignore", '
          '"replace", "xmlcharrefreplace", and '
          '"backslashreplace" (in Python 2.3+).  Default is "strict".  '
          'Usually specified as part of --output-encoding.',
          ['--output-encoding-error-handler'],
          {'default': 'strict', 'validator': validate_encoding_error_handler}),
         ('Specify the text encoding for error output.  Default is ASCII.  '
          'Optionally also specify the error handler for unencodable '
          'characters, after a colon (":"); default is "%s".  (See '
          '"--output-encoding-error-encoding".'
          % default_error_encoding_error_handler,
          ['--error-encoding', '-e'],
          {'metavar': '<name[:handler]>', 'default': 'ascii',
           'validator': validate_encoding_and_error_handler}),
         ('Specify the error handler for unencodable characters in '
          'error output.  See --output-encoding-error-handler for acceptable '
          'values.  Default is "%s".  Usually specified as part of '
          '--error-encoding.' % default_error_encoding_error_handler,
          ['--error-encoding-error-handler'],
          {'default': default_error_encoding_error_handler,
           'validator': validate_encoding_error_handler}),
         ('Specify the language of input text (ISO 639 2-letter identifier).'
          '  Default is "en" (English).',
          ['--language', '-l'], {'dest': 'language_code', 'default': 'en',
                                 'metavar': '<name>'}),
         ('Read configuration settings from <file>, if it exists.',
          ['--config'], {'metavar': '<file>', 'type': 'string',
                         'action': 'callback', 'callback': read_config_file}),
         ("Show this program's version number and exit.",
          ['--version', '-V'], {'action': 'version'}),
         ('Show this help message and exit.',
          ['--help', '-h'], {'action': 'help'}),
         # Hidden options, for development use only:
         (SUPPRESS_HELP, ['--dump-settings'], {'action': 'store_true'}),
         (SUPPRESS_HELP, ['--dump-internals'], {'action': 'store_true'}),
         (SUPPRESS_HELP, ['--dump-transforms'], {'action': 'store_true'}),
         (SUPPRESS_HELP, ['--dump-pseudo-xml'], {'action': 'store_true'}),
         (SUPPRESS_HELP, ['--expose-internal-attribute'],
          {'action': 'append', 'dest': 'expose_internals',
           'validator': validate_colon_separated_string_list}),))
    """Runtime settings and command-line options common to all Docutils front
    ends.  Setting specs specific to individual Docutils components are also
    used (see `populate_from_components()`)."""

    settings_defaults = {'_disable_config': None,
                         '_source': None,
                         '_destination': None}
    """Defaults for settings that don't have command-line option equivalents."""

    relative_path_settings = ('warning_stream',)

    config_section = 'general'

    version_template = '%%prog (Docutils %s)' % docutils.__version__
    """Default version message."""

    def __init__(self, components=(), defaults=None, read_config_files=None,
                 *args, **kwargs):
        """
        `components` is a list of Docutils components each containing a
        ``.settings_spec`` attribute.  `defaults` is a mapping of setting
        default overrides.
        """
        self.validators = {}
        """{setting: validation function} mapping, used by `validate_options`.
        Validation functions take three or five parameters: setting name,
        value, an `OptionParser` (``self``), and a `ConfigParser` and config
        file section if activated from a config file.  They return a (possibly
        modified) value, or raise an exception.  Populated from the "validator"
        keyword argument dictionary entries of components' ``settings_spec``
        attribute."""

        self.lists = {}
        """Set of list-type settings."""

        optparse.OptionParser.__init__(
            self, option_class=Option, add_help_option=None,
            formatter=optparse.TitledHelpFormatter(width=78),
            *args, **kwargs)
        if not self.version:
            self.version = self.version_template
        # Make an instance copy (it will be modified):
        self.relative_path_settings = list(self.relative_path_settings)
        self.components = (self,) + tuple(components)
        self.populate_from_components(self.components)
        self.set_defaults(**(defaults or {}))
        if read_config_files and not self.defaults['_disable_config']:
            try:
                config_settings = self.get_standard_config_settings()
            except ValueError, error:
                self.error(error)
            self.set_defaults(**config_settings.__dict__)

    def populate_from_components(self, components):
        """
        For each component, first populate from the `SettingsSpec.settings_spec`
        structure, then from the `SettingsSpec.settings_defaults` dictionary.
        After all components have been processed, check for and populate from
        each component's `SettingsSpec.settings_default_overrides` dictionary.
        """
        for component in components:
            if component is None:
                continue
            settings_spec = component.settings_spec
            self.relative_path_settings.extend(
                component.relative_path_settings)
            for i in range(0, len(settings_spec), 3):
                title, description, option_spec = settings_spec[i:i+3]
                if title:
                    group = optparse.OptionGroup(self, title, description)
                    self.add_option_group(group)
                else:
                    group = self        # single options
                for (help_text, option_strings, kwargs) in option_spec:
                    kwargs = kwargs.copy() # to be modified, locally only
                    if kwargs.has_key('validator'):
                        validator = kwargs['validator']
                        del kwargs['validator']
                    else:
                        validator = None
                    option = group.add_option(help=help_text, *option_strings,
                                              **kwargs)
                    if validator:
                        self.validators[option.dest] = validator
                    if kwargs.get('action') == 'append':
                        self.lists[option.dest] = 1
                if component.settings_defaults:
                    self.defaults.update(component.settings_defaults)
        for component in components:
            if component and component.settings_default_overrides:
                self.defaults.update(component.settings_default_overrides)

    def get_standard_config_files(self):
        """Return list of config files, from environment or standard."""
        try:
            config_files = os.environ['DOCUTILSCONFIG'].split(os.pathsep)
        except KeyError:
            config_files = self.standard_config_files
        return [os.path.expanduser(f) for f in config_files if f.strip()]

    def get_standard_config_settings(self):
        settings = Values()
        for filename in self.get_standard_config_files():
            settings.update(self.get_config_file_settings(filename), self)
        return settings

    def get_config_file_settings(self, config_file):
        """Returns a dictionary containing appropriate config file settings."""
        parser = ConfigParser()
        parser.read(config_file, self)
        base_path = os.path.dirname(config_file)
        applied = {}
        settings = Values()
        for component in self.components:
            if not component:
                continue
            for section in (tuple(component.config_section_dependencies or ())
                            + (component.config_section,)):
                if applied.has_key(section):
                    continue
                applied[section] = 1
                settings.update(parser.get_section(section), self)
        make_paths_absolute(
            settings.__dict__, self.relative_path_settings, base_path)
        return settings.__dict__

    def check_values(self, values, args):
        """Store positional arguments as runtime settings."""
        values._source, values._destination = self.check_args(args)
        make_paths_absolute(values.__dict__, self.relative_path_settings,
                            os.getcwd())
        return values

    def check_args(self, args):
        source = destination = None
        if args:
            source = args.pop(0)
            if source == '-':           # means stdin
                source = None
        if args:
            destination = args.pop(0)
            if destination == '-':      # means stdout
                destination = None
        if args:
            self.error('Maximum 2 arguments allowed.')
        if source and source == destination:
            self.error('Do not specify the same file for both source and '
                       'destination.  It will clobber the source file.')
        return source, destination

    def get_default_values(self):
        """Needed to get custom `Values` instances."""
        return Values(self.defaults)


class ConfigParser(CP.ConfigParser):

    old_settings = {
        'pep_stylesheet': ('pep_html writer', 'stylesheet'),
        'pep_stylesheet_path': ('pep_html writer', 'stylesheet_path'),
        'pep_template': ('pep_html writer', 'template')}
    """{old setting: (new section, new setting)} mapping, used by
    `handle_old_config`, to convert settings from the old [options] section."""

    old_warning = """
The "[option]" section is deprecated.  Support for old-format configuration
files may be removed in a future Docutils release.  Please revise your
configuration files.  See <http://docutils.sf.net/docs/user/config.html>,
section "Old-Format Configuration Files".
"""

    def read(self, filenames, option_parser):
        if type(filenames) in (types.StringType, types.UnicodeType):
            filenames = [filenames]
        for filename in filenames:
            CP.ConfigParser.read(self, filename)
            if self.has_section('options'):
                self.handle_old_config(filename)
            self.validate_settings(filename, option_parser)

    def handle_old_config(self, filename):
        warnings.warn_explicit(self.old_warning, ConfigDeprecationWarning,
                               filename, 0)
        options = self.get_section('options')
        if not self.has_section('general'):
            self.add_section('general')
        for key, value in options.items():
            if self.old_settings.has_key(key):
                section, setting = self.old_settings[key]
                if not self.has_section(section):
                    self.add_section(section)
            else:
                section = 'general'
                setting = key
            if not self.has_option(section, setting):
                self.set(section, setting, value)
        self.remove_section('options')

    def validate_settings(self, filename, option_parser):
        """Call the validator function on all applicable settings."""
        for section in self.sections():
            for setting in self.options(section):
                validator = option_parser.validators.get(setting)
                if validator:
                    value = self.get(section, setting, raw=1)
                    try:
                        new_value = validator(
                            setting, value, option_parser,
                            config_parser=self, config_section=section)
                    except Exception, error:
                        raise (ValueError(
                            'Error in config file "%s", section "[%s]":\n'
                            '    %s: %s\n        %s = %s'
                            % (filename, section, error.__class__.__name__,
                               error, setting, value)), None, sys.exc_info()[2])
                    self.set(section, setting, new_value)

    def optionxform(self, optionstr):
        """
        Transform '-' to '_' so the cmdline form of option names can be used.
        """
        return optionstr.lower().replace('-', '_')

    def get_section(self, section):
        """
        Return a given section as a dictionary (empty if the section
        doesn't exist).
        """
        section_dict = {}
        if self.has_section(section):
            for option in self.options(section):
                section_dict[option] = self.get(section, option, raw=1)
        return section_dict


class ConfigDeprecationWarning(DeprecationWarning):
    """Warning for deprecated configuration file features."""


=== Added File Zope/lib/python/third_party/docutils/docutils/io.py ===
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
# Revision: $Revision: 1.1.4.1 $
# Date: $Date: 2004/10/29 19:08:20 $
# Copyright: This module has been placed in the public domain.

"""
I/O classes provide a uniform API for low-level input and output.  Subclasses
will exist for a variety of input/output mechanisms.
"""

__docformat__ = 'reStructuredText'

import sys
try:
    import locale
except:
    pass
from types import UnicodeType
from docutils import TransformSpec


class Input(TransformSpec):

    """
    Abstract base class for input wrappers.
    """

    component_type = 'input'

    default_source_path = None

    def __init__(self, source=None, source_path=None, encoding=None):
        self.encoding = encoding
        """Text encoding for the input source."""

        self.source = source
        """The source of input data."""

        self.source_path = source_path
        """A text reference to the source."""

        if not source_path:
            self.source_path = self.default_source_path

    def __repr__(self):
        return '%s: source=%r, source_path=%r' % (self.__class__, self.source,
                                                  self.source_path)

    def read(self):
        raise NotImplementedError

    def decode(self, data):
        """
        Decode a string, `data`, heuristically.
        Raise UnicodeError if unsuccessful.

        The client application should call ``locale.setlocale`` at the
        beginning of processing::

            locale.setlocale(locale.LC_ALL, '')
        """
        if (self.encoding and self.encoding.lower() == 'unicode'
            or isinstance(data, UnicodeType)):
            return unicode(data)
        encodings = [self.encoding, 'utf-8']
        try:
            encodings.append(locale.nl_langinfo(locale.CODESET))
        except:
            pass
        try:
            encodings.append(locale.getlocale()[1])
        except:
            pass
        try:
            encodings.append(locale.getdefaultlocale()[1])
        except:
            pass
        encodings.append('latin-1')
        for enc in encodings:
            if not enc:
                continue
            try:
                return unicode(data, enc)
            except (UnicodeError, LookupError):
                pass
        raise UnicodeError(
            'Unable to decode input data.  Tried the following encodings: %s.'
            % ', '.join([repr(enc) for enc in encodings if enc]))


class Output(TransformSpec):

    """
    Abstract base class for output wrappers.
    """

    component_type = 'output'

    default_destination_path = None

    def __init__(self, destination=None, destination_path=None,
                 encoding=None, error_handler='strict'):
        self.encoding = encoding
        """Text encoding for the output destination."""

        self.error_handler = error_handler or 'strict'
        """Text encoding error handler."""

        self.destination = destination
        """The destination for output data."""

        self.destination_path = destination_path
        """A text reference to the destination."""

        if not destination_path:
            self.destination_path = self.default_destination_path

    def __repr__(self):
        return ('%s: destination=%r, destination_path=%r'
                % (self.__class__, self.destination, self.destination_path))

    def write(self, data):
        """`data` is a Unicode string, to be encoded by `self.encode`."""
        raise NotImplementedError

    def encode(self, data):
        if self.encoding and self.encoding.lower() == 'unicode':
            return data
        else:
            try:
                return data.encode(self.encoding, self.error_handler)
            except ValueError:
                # ValueError is raised if there are unencodable chars
                # in data and the error_handler isn't found.
                if self.error_handler == 'xmlcharrefreplace':
                    # We are using xmlcharrefreplace with a Python
                    # version that doesn't support it (2.1 or 2.2), so
                    # we emulate its behavior.
                    return ''.join([self.xmlcharref_encode(char) for char in data])
                else:
                    raise

    def xmlcharref_encode(self, char):
        """Emulate Python 2.3's 'xmlcharrefreplace' encoding error handler."""
        try:
            return char.encode(self.encoding, 'strict')
        except UnicodeError:
            return '&#%i;' % ord(char)


class FileInput(Input):

    """
    Input for single, simple file-like objects.
    """

    def __init__(self, source=None, source_path=None,
                 encoding=None, autoclose=1, handle_io_errors=1):
        """
        :Parameters:
            - `source`: either a file-like object (which is read directly), or
              `None` (which implies `sys.stdin` if no `source_path` given).
            - `source_path`: a path to a file, which is opened and then read.
            - `autoclose`: close automatically after read (boolean); always
              false if `sys.stdin` is the source.
        """
        Input.__init__(self, source, source_path, encoding)
        self.autoclose = autoclose
        self.handle_io_errors = handle_io_errors
        if source is None:
            if source_path:
                try:
                    self.source = open(source_path)
                except IOError, error:
                    if not handle_io_errors:
                        raise
                    print >>sys.stderr, '%s: %s' % (error.__class__.__name__,
                                                    error)
                    print >>sys.stderr, (
                        'Unable to open source file for reading (%r).  Exiting.'
                        % source_path)
                    sys.exit(1)
            else:
                self.source = sys.stdin
                self.autoclose = None
        if not source_path:
            try:
                self.source_path = self.source.name
            except AttributeError:
                pass

    def read(self):
        """
        Read and decode a single file and return the data (Unicode string).
        """
        data = self.source.read()
        if self.autoclose:
            self.close()
        return self.decode(data)

    def close(self):
        self.source.close()


class FileOutput(Output):

    """
    Output for single, simple file-like objects.
    """

    def __init__(self, destination=None, destination_path=None,
                 encoding=None, error_handler='strict', autoclose=1,
                 handle_io_errors=1):
        """
        :Parameters:
            - `destination`: either a file-like object (which is written
              directly) or `None` (which implies `sys.stdout` if no
              `destination_path` given).
            - `destination_path`: a path to a file, which is opened and then
              written.
            - `autoclose`: close automatically after write (boolean); always
              false if `sys.stdout` is the destination.
        """
        Output.__init__(self, destination, destination_path,
                        encoding, error_handler)
        self.opened = 1
        self.autoclose = autoclose
        self.handle_io_errors = handle_io_errors
        if destination is None:
            if destination_path:
                self.opened = None
            else:
                self.destination = sys.stdout
                self.autoclose = None
        if not destination_path:
            try:
                self.destination_path = self.destination.name
            except AttributeError:
                pass

    def open(self):
        try:
            self.destination = open(self.destination_path, 'w')
        except IOError, error:
            if not self.handle_io_errors:
                raise
            print >>sys.stderr, '%s: %s' % (error.__class__.__name__,
                                            error)
            print >>sys.stderr, ('Unable to open destination file for writing '
                                 '(%r).  Exiting.' % self.destination_path)
            sys.exit(1)
        self.opened = 1

    def write(self, data):
        """Encode `data`, write it to a single file, and return it."""
        output = self.encode(data)
        if not self.opened:
            self.open()
        self.destination.write(output)
        if self.autoclose:
            self.close()
        return output

    def close(self):
        self.destination.close()
        self.opened = None


class StringInput(Input):

    """
    Direct string input.
    """

    default_source_path = '<string>'

    def read(self):
        """Decode and return the source string."""
        return self.decode(self.source)


class StringOutput(Output):

    """
    Direct string output.
    """

    default_destination_path = '<string>'

    def write(self, data):
        """Encode `data`, store it in `self.destination`, and return it."""
        self.destination = self.encode(data)
        return self.destination


class NullInput(Input):

    """
    Degenerate input: read nothing.
    """

    default_source_path = 'null input'

    def read(self):
        """Return a null string."""
        return u''


class NullOutput(Output):

    """
    Degenerate output: write nothing.
    """

    default_destination_path = 'null output'

    def write(self, data):
        """Do nothing ([don't even] send data to the bit bucket)."""
        pass


=== Added File Zope/lib/python/third_party/docutils/docutils/nodes.py ===
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
# Revision: $Revision: 1.1.4.1 $
# Date: $Date: 2004/10/29 19:08:20 $
# Copyright: This module has been placed in the public domain.

"""
Docutils document tree element class library.

Classes in CamelCase are abstract base classes or auxiliary classes. The one
exception is `Text`, for a text (PCDATA) node; uppercase is used to
differentiate from element classes.  Classes in lower_case_with_underscores
are element classes, matching the XML element generic identifiers in the DTD_.

The position of each node (the level at which it can occur) is significant and
is represented by abstract base classes (`Root`, `Structural`, `Body`,
`Inline`, etc.).  Certain transformations will be easier because we can use
``isinstance(node, base_class)`` to determine the position of the node in the
hierarchy.

.. _DTD: http://docutils.sourceforge.net/docs/ref/docutils.dtd
"""

__docformat__ = 'reStructuredText'

import sys
import os
import re
import xml.dom.minidom
from types import IntType, SliceType, StringType, UnicodeType, \
     TupleType, ListType
from UserString import UserString


# ==============================
#  Functional Node Base Classes
# ==============================

class Node:

    """Abstract base class of nodes in a document tree."""

    parent = None
    """Back-reference to the Node immediately containing this Node."""

    document = None
    """The `document` node at the root of the tree containing this Node."""

    source = None
    """Path or description of the input source which generated this Node."""

    line = None
    """The line number (1-based) of the beginning of this Node in `source`."""

    def __nonzero__(self):
        """
        Node instances are always true, even if they're empty.  A node is more
        than a simple container.  Its boolean "truth" does not depend on
        having one or more subnodes in the doctree.

        Use `len()` to check node length.  Use `None` to represent a boolean
        false value.
        """
        return 1

    def asdom(self, dom=xml.dom.minidom):
        """Return a DOM **fragment** representation of this Node."""
        domroot = dom.Document()
        return self._dom_node(domroot)

    def pformat(self, indent='    ', level=0):
        """Return an indented pseudo-XML representation, for test purposes."""
        raise NotImplementedError

    def copy(self):
        """Return a copy of self."""
        raise NotImplementedError

    def setup_child(self, child):
        child.parent = self
        if self.document:
            child.document = self.document
            if child.source is None:
                child.source = self.document.current_source
            if child.line is None:
                child.line = self.document.current_line

    def walk(self, visitor):
        """
        Traverse a tree of `Node` objects, calling ``visit_...`` methods of
        `visitor` when entering each node. If there is no
        ``visit_particular_node`` method for a node of type
        ``particular_node``, the ``unknown_visit`` method is called.  (The
        `walkabout()` method is similar, except it also calls ``depart_...``
        methods before exiting each node.)

        This tree traversal supports limited in-place tree
        modifications.  Replacing one node with one or more nodes is
        OK, as is removing an element.  However, if the node removed
        or replaced occurs after the current node, the old node will
        still be traversed, and any new nodes will not.

        Within ``visit_...`` methods (and ``depart_...`` methods for
        `walkabout()`), `TreePruningException` subclasses may be raised
        (`SkipChildren`, `SkipSiblings`, `SkipNode`, `SkipDeparture`).

        Parameter `visitor`: A `NodeVisitor` object, containing a
        ``visit_...`` method for each `Node` subclass encountered.
        """
        name = 'visit_' + self.__class__.__name__
        method = getattr(visitor, name, visitor.unknown_visit)
        visitor.document.reporter.debug(name, category='nodes.Node.walk')
        try:
            method(self)
        except (SkipChildren, SkipNode):
            return
        except SkipDeparture:           # not applicable; ignore
            pass
        children = self.get_children()
        try:
            for child in children[:]:
                child.walk(visitor)
        except SkipSiblings:
            pass

    def walkabout(self, visitor):
        """
        Perform a tree traversal similarly to `Node.walk()` (which see),
        except also call ``depart_...`` methods before exiting each node. If
        there is no ``depart_particular_node`` method for a node of type
        ``particular_node``, the ``unknown_departure`` method is called.

        Parameter `visitor`: A `NodeVisitor` object, containing ``visit_...``
        and ``depart_...`` methods for each `Node` subclass encountered.
        """
        call_depart = 1
        name = 'visit_' + self.__class__.__name__
        method = getattr(visitor, name, visitor.unknown_visit)
        visitor.document.reporter.debug(name, category='nodes.Node.walkabout')
        try:
            try:
                method(self)
            except SkipNode:
                return
            except SkipDeparture:
                call_depart = 0
            children = self.get_children()
            try:
                for child in children[:]:
                    child.walkabout(visitor)
            except SkipSiblings:
                pass
        except SkipChildren:
            pass
        if call_depart:
            name = 'depart_' + self.__class__.__name__
            method = getattr(visitor, name, visitor.unknown_departure)
            visitor.document.reporter.debug(
                  name, category='nodes.Node.walkabout')
            method(self)


class Text(Node, UserString):

    """
    Instances are terminal nodes (leaves) containing text only; no child
    nodes or attributes.  Initialize by passing a string to the constructor.
    Access the text itself with the `astext` method.
    """

    tagname = '#text'

    def __init__(self, data, rawsource=''):
        UserString.__init__(self, data)

        self.rawsource = rawsource
        """The raw text from which this element was constructed."""

    def __repr__(self):
        data = repr(self.data)
        if len(data) > 70:
            data = repr(self.data[:64] + ' ...')
        return '<%s: %s>' % (self.tagname, data)

    def __len__(self):
        return len(self.data)

    def shortrepr(self):
        data = repr(self.data)
        if len(data) > 20:
            data = repr(self.data[:16] + ' ...')
        return '<%s: %s>' % (self.tagname, data)

    def _dom_node(self, domroot):
        return domroot.createTextNode(self.data)

    def astext(self):
        return self.data

    def copy(self):
        return self.__class__(self.data)

    def pformat(self, indent='    ', level=0):
        result = []
        indent = indent * level
        for line in self.data.splitlines():
            result.append(indent + line + '\n')
        return ''.join(result)

    def get_children(self):
        """Text nodes have no children. Return []."""
        return []


class Element(Node):

    """
    `Element` is the superclass to all specific elements.

    Elements contain attributes and child nodes.  Elements emulate
    dictionaries for attributes, indexing by attribute name (a string).  To
    set the attribute 'att' to 'value', do::

        element['att'] = 'value'

    Elements also emulate lists for child nodes (element nodes and/or text
    nodes), indexing by integer.  To get the first child node, use::

        element[0]

    Elements may be constructed using the ``+=`` operator.  To add one new
    child node to element, do::

        element += node

    This is equivalent to ``element.append(node)``.

    To add a list of multiple child nodes at once, use the same ``+=``
    operator::

        element += [node1, node2]

    This is equivalent to ``element.extend([node1, node2])``.
    """

    tagname = None
    """The element generic identifier. If None, it is set as an instance
    attribute to the name of the class."""

    child_text_separator = '\n\n'
    """Separator for child nodes, used by `astext()` method."""

    def __init__(self, rawsource='', *children, **attributes):
        self.rawsource = rawsource
        """The raw text from which this element was constructed."""

        self.children = []
        """List of child nodes (elements and/or `Text`)."""

        self.extend(children)           # maintain parent info

        self.attributes = {}
        """Dictionary of attribute {name: value}."""

        for att, value in attributes.items():
            self.attributes[att.lower()] = value

        if self.tagname is None:
            self.tagname = self.__class__.__name__

    def _dom_node(self, domroot):
        element = domroot.createElement(self.tagname)
        for attribute, value in self.attributes.items():
            if isinstance(value, ListType):
                value = ' '.join(['%s' % v for v in value])
            element.setAttribute(attribute, '%s' % value)
        for child in self.children:
            element.appendChild(child._dom_node(domroot))
        return element

    def __repr__(self):
        data = ''
        for c in self.children:
            data += c.shortrepr()
            if len(data) > 60:
                data = data[:56] + ' ...'
                break
        if self.hasattr('name'):
            return '<%s "%s": %s>' % (self.__class__.__name__,
                                      self.attributes['name'], data)
        else:
            return '<%s: %s>' % (self.__class__.__name__, data)

    def shortrepr(self):
        if self.hasattr('name'):
            return '<%s "%s"...>' % (self.__class__.__name__,
                                      self.attributes['name'])
        else:
            return '<%s...>' % self.tagname

    def __str__(self):
        return self.__unicode__().encode('raw_unicode_escape')

    def __unicode__(self):
        if self.children:
            return u'%s%s%s' % (self.starttag(),
                                 ''.join([str(c) for c in self.children]),
                                 self.endtag())
        else:
            return self.emptytag()

    def starttag(self):
        parts = [self.tagname]
        for name, value in self.attlist():
            if value is None:           # boolean attribute
                parts.append(name)
            elif isinstance(value, ListType):
                values = ['%s' % v for v in value]
                parts.append('%s="%s"' % (name, ' '.join(values)))
            else:
                parts.append('%s="%s"' % (name, value))
        return '<%s>' % ' '.join(parts)

    def endtag(self):
        return '</%s>' % self.tagname

    def emptytag(self):
        return u'<%s/>' % ' '.join([self.tagname] +
                                    ['%s="%s"' % (n, v)
                                     for n, v in self.attlist()])

    def __len__(self):
        return len(self.children)

    def __getitem__(self, key):
        if isinstance(key, UnicodeType) or isinstance(key, StringType):
            return self.attributes[key]
        elif isinstance(key, IntType):
            return self.children[key]
        elif isinstance(key, SliceType):
            assert key.step in (None, 1), 'cannot handle slice with stride'
            return self.children[key.start:key.stop]
        else:
            raise TypeError, ('element index must be an integer, a slice, or '
                              'an attribute name string')

    def __setitem__(self, key, item):
        if isinstance(key, UnicodeType) or isinstance(key, StringType):
            self.attributes[str(key)] = item
        elif isinstance(key, IntType):
            self.setup_child(item)
            self.children[key] = item
        elif isinstance(key, SliceType):
            assert key.step in (None, 1), 'cannot handle slice with stride'
            for node in item:
                self.setup_child(node)
            self.children[key.start:key.stop] = item
        else:
            raise TypeError, ('element index must be an integer, a slice, or '
                              'an attribute name string')

    def __delitem__(self, key):
        if isinstance(key, UnicodeType) or isinstance(key, StringType):
            del self.attributes[key]
        elif isinstance(key, IntType):
            del self.children[key]
        elif isinstance(key, SliceType):
            assert key.step in (None, 1), 'cannot handle slice with stride'
            del self.children[key.start:key.stop]
        else:
            raise TypeError, ('element index must be an integer, a simple '
                              'slice, or an attribute name string')

    def __add__(self, other):
        return self.children + other

    def __radd__(self, other):
        return other + self.children

    def __iadd__(self, other):
        """Append a node or a list of nodes to `self.children`."""
        if isinstance(other, Node):
            self.setup_child(other)
            self.children.append(other)
        elif other is not None:
            for node in other:
                self.setup_child(node)
            self.children.extend(other)
        return self

    def astext(self):
        return self.child_text_separator.join(
              [child.astext() for child in self.children])

    def attlist(self):
        attlist = self.attributes.items()
        attlist.sort()
        return attlist

    def get(self, key, failobj=None):
        return self.attributes.get(key, failobj)

    def hasattr(self, attr):
        return self.attributes.has_key(attr)

    def delattr(self, attr):
        if self.attributes.has_key(attr):
            del self.attributes[attr]

    def setdefault(self, key, failobj=None):
        return self.attributes.setdefault(key, failobj)

    has_key = hasattr

    def append(self, item):
        self.setup_child(item)
        self.children.append(item)

    def extend(self, item):
        for node in item:
            self.setup_child(node)
        self.children.extend(item)

    def insert(self, index, item):
        if isinstance(item, Node):
            self.setup_child(item)
            self.children.insert(index, item)
        elif item is not None:
            self[index:index] = item

    def pop(self, i=-1):
        return self.children.pop(i)

    def remove(self, item):
        self.children.remove(item)

    def index(self, item):
        return self.children.index(item)

    def replace(self, old, new):
        """Replace one child `Node` with another child or children."""
        index = self.index(old)
        if isinstance(new, Node):
            self.setup_child(new)
            self[index] = new
        elif new is not None:
            self[index:index+1] = new

    def first_child_matching_class(self, childclass, start=0, end=sys.maxint):
        """
        Return the index of the first child whose class exactly matches.

        Parameters:

        - `childclass`: A `Node` subclass to search for, or a tuple of `Node`
          classes. If a tuple, any of the classes may match.
        - `start`: Initial index to check.
        - `end`: Initial index to *not* check.
        """
        if not isinstance(childclass, TupleType):
            childclass = (childclass,)
        for index in range(start, min(len(self), end)):
            for c in childclass:
                if isinstance(self[index], c):
                    return index
        return None

    def first_child_not_matching_class(self, childclass, start=0,
                                       end=sys.maxint):
        """
        Return the index of the first child whose class does *not* match.

        Parameters:

        - `childclass`: A `Node` subclass to skip, or a tuple of `Node`
          classes. If a tuple, none of the classes may match.
        - `start`: Initial index to check.
        - `end`: Initial index to *not* check.
        """
        if not isinstance(childclass, TupleType):
            childclass = (childclass,)
        for index in range(start, min(len(self), end)):
            match = 0
            for c in childclass:
                if isinstance(self.children[index], c):
                    match = 1
                    break
            if not match:
                return index
        return None

    def pformat(self, indent='    ', level=0):
        return ''.join(['%s%s\n' % (indent * level, self.starttag())] +
                       [child.pformat(indent, level+1)
                        for child in self.children])

    def get_children(self):
        """Return this element's children."""
        return self.children

    def copy(self):
        return self.__class__(**self.attributes)

    def set_class(self, name):
        """Add a new name to the "class" attribute."""
        self.attributes['class'] = (self.attributes.get('class', '') + ' '
                                    + name.lower()).strip()


class TextElement(Element):

    """
    An element which directly contains text.

    Its children are all `Text` or `TextElement` subclass nodes.  You can
    check whether an element's context is inline simply by checking whether
    its immediate parent is a `TextElement` instance (including subclasses).
    This is handy for nodes like `image` that can appear both inline and as
    standalone body elements.

    If passing children to `__init__()`, make sure to set `text` to
    ``''`` or some other suitable value.
    """

    child_text_separator = ''
    """Separator for child nodes, used by `astext()` method."""

    def __init__(self, rawsource='', text='', *children, **attributes):
        if text != '':
            textnode = Text(text)
            Element.__init__(self, rawsource, textnode, *children,
                              **attributes)
        else:
            Element.__init__(self, rawsource, *children, **attributes)


class FixedTextElement(TextElement):

    """An element which directly contains preformatted text."""

    def __init__(self, rawsource='', text='', *children, **attributes):
        TextElement.__init__(self, rawsource, text, *children, **attributes)
        self.attributes['xml:space'] = 'preserve'


# ========
#  Mixins
# ========

class Resolvable:

    resolved = 0


class BackLinkable:

    def add_backref(self, refid):
        self.setdefault('backrefs', []).append(refid)


# ====================
#  Element Categories
# ====================

class Root: pass

class Titular: pass

class PreDecorative:
    """Category of Node which may occur before Decorative Nodes."""

class PreBibliographic(PreDecorative):
    """Category of Node which may occur before Bibliographic Nodes."""

class Bibliographic(PreDecorative): pass

class Decorative: pass

class Structural: pass

class Body: pass

class General(Body): pass

class Sequential(Body): pass

class Admonition(Body): pass

class Special(Body):
    """Special internal body elements."""

class Invisible:
    """Internal elements that don't appear in output."""

class Part: pass

class Inline: pass

class Referential(Resolvable): pass

class Targetable(Resolvable):

    referenced = 0

    indirect_reference_name = None
    """Holds the whitespace_normalized_name (contains mixed case) of a target"""

class Labeled:
    """Contains a `label` as its first element."""


# ==============
#  Root Element
# ==============

class document(Root, Structural, Element):

    def __init__(self, settings, reporter, *args, **kwargs):
        Element.__init__(self, *args, **kwargs)

        self.current_source = None
        """Path to or description of the input source being processed."""

        self.current_line = None
        """Line number (1-based) of `current_source`."""

        self.settings = settings
        """Runtime settings data record."""

        self.reporter = reporter
        """System message generator."""

        self.external_targets = []
        """List of external target nodes."""

        self.internal_targets = []
        """List of internal target nodes."""

        self.indirect_targets = []
        """List of indirect target nodes."""

        self.substitution_defs = {}
        """Mapping of substitution names to substitution_definition nodes."""

        self.substitution_names = {}
        """Mapping of case-normalized substitution names to case-sensitive
        names."""

        self.refnames = {}
        """Mapping of names to lists of referencing nodes."""

        self.refids = {}
        """Mapping of ids to lists of referencing nodes."""

        self.nameids = {}
        """Mapping of names to unique id's."""

        self.nametypes = {}
        """Mapping of names to hyperlink type (boolean: True => explicit,
        False => implicit."""

        self.ids = {}
        """Mapping of ids to nodes."""

        self.substitution_refs = {}
        """Mapping of substitution names to lists of substitution_reference
        nodes."""

        self.footnote_refs = {}
        """Mapping of footnote labels to lists of footnote_reference nodes."""

        self.citation_refs = {}
        """Mapping of citation labels to lists of citation_reference nodes."""

        self.anonymous_targets = []
        """List of anonymous target nodes."""

        self.anonymous_refs = []
        """List of anonymous reference nodes."""

        self.autofootnotes = []
        """List of auto-numbered footnote nodes."""

        self.autofootnote_refs = []
        """List of auto-numbered footnote_reference nodes."""

        self.symbol_footnotes = []
        """List of symbol footnote nodes."""

        self.symbol_footnote_refs = []
        """List of symbol footnote_reference nodes."""

        self.footnotes = []
        """List of manually-numbered footnote nodes."""

        self.citations = []
        """List of citation nodes."""

        self.autofootnote_start = 1
        """Initial auto-numbered footnote number."""

        self.symbol_footnote_start = 0
        """Initial symbol footnote symbol index."""

        self.id_start = 1
        """Initial ID number."""

        self.parse_messages = []
        """System messages generated while parsing."""

        self.transform_messages = []
        """System messages generated while applying transforms."""

        import docutils.transforms
        self.transformer = docutils.transforms.Transformer(self)
        """Storage for transforms to be applied to this document."""

        self.document = self

    def asdom(self, dom=xml.dom.minidom):
        """Return a DOM representation of this document."""
        domroot = dom.Document()
        domroot.appendChild(self._dom_node(domroot))
        return domroot

    def set_id(self, node, msgnode=None):
        if node.has_key('id'):
            id = node['id']
            if self.ids.has_key(id) and self.ids[id] is not node:
                msg = self.reporter.severe('Duplicate ID: "%s".' % id)
                if msgnode != None:
                    msgnode += msg
        else:
            if node.has_key('name'):
                id = make_id(node['name'])
            else:
                id = ''
            while not id or self.ids.has_key(id):
                id = 'id%s' % self.id_start
                self.id_start += 1
            node['id'] = id
        self.ids[id] = node
        return id

    def set_name_id_map(self, node, id, msgnode=None, explicit=None):
        """
        `self.nameids` maps names to IDs, while `self.nametypes` maps names to
        booleans representing hyperlink type (True==explicit,
        False==implicit).  This method updates the mappings.

        The following state transition table shows how `self.nameids` ("ids")
        and `self.nametypes` ("types") change with new input (a call to this
        method), and what actions are performed:

        ====  =====  ========  ========  =======  ====  =====  =====
         Old State    Input          Action        New State   Notes
        -----------  --------  -----------------  -----------  -----
        ids   types  new type  sys.msg.  dupname  ids   types
        ====  =====  ========  ========  =======  ====  =====  =====
        --    --     explicit  --        --       new   True
        --    --     implicit  --        --       new   False
        None  False  explicit  --        --       new   True
        old   False  explicit  implicit  old      new   True
        None  True   explicit  explicit  new      None  True
        old   True   explicit  explicit  new,old  None  True   [#]_
        None  False  implicit  implicit  new      None  False
        old   False  implicit  implicit  new,old  None  False
        None  True   implicit  implicit  new      None  True
        old   True   implicit  implicit  new      old   True
        ====  =====  ========  ========  =======  ====  =====  =====

        .. [#] Do not clear the name-to-id map or invalidate the old target if
           both old and new targets are external and refer to identical URIs.
           The new target is invalidated regardless.
        """
        if node.has_key('name'):
            name = node['name']
            if self.nameids.has_key(name):
                self.set_duplicate_name_id(node, id, name, msgnode, explicit)
            else:
                self.nameids[name] = id
                self.nametypes[name] = explicit

    def set_duplicate_name_id(self, node, id, name, msgnode, explicit):
        old_id = self.nameids[name]
        old_explicit = self.nametypes[name]
        self.nametypes[name] = old_explicit or explicit
        if explicit:
            if old_explicit:
                level = 2
                if old_id is not None:
                    old_node = self.ids[old_id]
                    if node.has_key('refuri'):
                        refuri = node['refuri']
                        if old_node.has_key('name') \
                               and old_node.has_key('refuri') \
                               and old_node['refuri'] == refuri:
                            level = 1   # just inform if refuri's identical
                    if level > 1:
                        dupname(old_node)
                        self.nameids[name] = None
                msg = self.reporter.system_message(
                    level, 'Duplicate explicit target name: "%s".' % name,
                    backrefs=[id], base_node=node)
                if msgnode != None:
                    msgnode += msg
                dupname(node)
            else:
                self.nameids[name] = id
                if old_id is not None:
                    old_node = self.ids[old_id]
                    dupname(old_node)
        else:
            if old_id is not None and not old_explicit:
                self.nameids[name] = None
                old_node = self.ids[old_id]
                dupname(old_node)
            dupname(node)
        if not explicit or (not old_explicit and old_id is not None):
            msg = self.reporter.info(
                'Duplicate implicit target name: "%s".' % name,
                backrefs=[id], base_node=node)
            if msgnode != None:
                msgnode += msg

    def has_name(self, name):
        return self.nameids.has_key(name)

    # "note" here is an imperative verb: "take note of".
    def note_implicit_target(self, target, msgnode=None):
        id = self.set_id(target, msgnode)
        self.set_name_id_map(target, id, msgnode, explicit=None)

    def note_explicit_target(self, target, msgnode=None):
        id = self.set_id(target, msgnode)
        self.set_name_id_map(target, id, msgnode, explicit=1)

    def note_refname(self, node):
        self.refnames.setdefault(node['refname'], []).append(node)

    def note_refid(self, node):
        self.refids.setdefault(node['refid'], []).append(node)

    def note_external_target(self, target):
        self.external_targets.append(target)

    def note_internal_target(self, target):
        self.internal_targets.append(target)

    def note_indirect_target(self, target):
        self.indirect_targets.append(target)
        if target.has_key('name'):
            self.note_refname(target)

    def note_anonymous_target(self, target):
        self.set_id(target)
        self.anonymous_targets.append(target)

    def note_anonymous_ref(self, ref):
        self.anonymous_refs.append(ref)

    def note_autofootnote(self, footnote):
        self.set_id(footnote)
        self.autofootnotes.append(footnote)

    def note_autofootnote_ref(self, ref):
        self.set_id(ref)
        self.autofootnote_refs.append(ref)

    def note_symbol_footnote(self, footnote):
        self.set_id(footnote)
        self.symbol_footnotes.append(footnote)

    def note_symbol_footnote_ref(self, ref):
        self.set_id(ref)
        self.symbol_footnote_refs.append(ref)

    def note_footnote(self, footnote):
        self.set_id(footnote)
        self.footnotes.append(footnote)

    def note_footnote_ref(self, ref):
        self.set_id(ref)
        self.footnote_refs.setdefault(ref['refname'], []).append(ref)
        self.note_refname(ref)

    def note_citation(self, citation):
        self.citations.append(citation)

    def note_citation_ref(self, ref):
        self.set_id(ref)
        self.citation_refs.setdefault(ref['refname'], []).append(ref)
        self.note_refname(ref)

    def note_substitution_def(self, subdef, def_name, msgnode=None):
        name = subdef['name'] = whitespace_normalize_name(def_name)
        if self.substitution_defs.has_key(name):
            msg = self.reporter.error(
                  'Duplicate substitution definition name: "%s".' % name,
                  base_node=subdef)
            if msgnode != None:
                msgnode += msg
            oldnode = self.substitution_defs[name]
            dupname(oldnode)
        # keep only the last definition:
        self.substitution_defs[name] = subdef
        # case-insensitive mapping:
        self.substitution_names[fully_normalize_name(name)] = name

    def note_substitution_ref(self, subref, refname):
        name = subref['refname'] = whitespace_normalize_name(refname)
        self.substitution_refs.setdefault(name, []).append(subref)

    def note_pending(self, pending, priority=None):
        self.transformer.add_pending(pending, priority)

    def note_parse_message(self, message):
        self.parse_messages.append(message)

    def note_transform_message(self, message):
        self.transform_messages.append(message)

    def note_source(self, source, offset):
        self.current_source = source
        if offset is None:
            self.current_line = offset
        else:
            self.current_line = offset + 1

    def copy(self):
        return self.__class__(self.settings, self.reporter,
                              **self.attributes)


# ================
#  Title Elements
# ================

class title(Titular, PreBibliographic, TextElement): pass
class subtitle(Titular, PreBibliographic, TextElement): pass
class rubric(Titular, TextElement): pass


# ========================
#  Bibliographic Elements
# ========================

class docinfo(Bibliographic, Element): pass
class info(Bibliographic, Element): pass
class author(Bibliographic, TextElement): pass
class authors(Bibliographic, Element): pass
class organization(Bibliographic, TextElement): pass
class address(Bibliographic, FixedTextElement): pass
class contact(Bibliographic, TextElement): pass
class version(Bibliographic, TextElement): pass
class revision(Bibliographic, TextElement): pass
class status(Bibliographic, TextElement): pass
class date(Bibliographic, TextElement): pass
class copyright(Bibliographic, TextElement): pass


# =====================
#  Decorative Elements
# =====================

class decoration(Decorative, Element): pass
class header(Decorative, Element): pass
class footer(Decorative, Element): pass


# =====================
#  Structural Elements
# =====================

class section(Structural, Element): pass


class topic(Structural, Element):

    """
    Topics are terminal, "leaf" mini-sections, like block quotes with titles,
    or textual figures.  A topic is just like a section, except that it has no
    subsections, and it doesn't have to conform to section placement rules.

    Topics are allowed wherever body elements (list, table, etc.) are allowed,
    but only at the top level of a section or document.  Topics cannot nest
    inside topics, sidebars, or body elements; you can't have a topic inside a
    table, list, block quote, etc.
    """


class sidebar(Structural, Element):

    """
    Sidebars are like miniature, parallel documents that occur inside other
    documents, providing related or reference material.  A sidebar is
    typically offset by a border and "floats" to the side of the page; the
    document's main text may flow around it.  Sidebars can also be likened to
    super-footnotes; their content is outside of the flow of the document's
    main text.

    Sidebars are allowed wherever body elements (list, table, etc.) are
    allowed, but only at the top level of a section or document.  Sidebars
    cannot nest inside sidebars, topics, or body elements; you can't have a
    sidebar inside a table, list, block quote, etc.
    """


class transition(Structural, Element): pass


# ===============
#  Body Elements
# ===============

class paragraph(General, TextElement): pass
class bullet_list(Sequential, Element): pass
class enumerated_list(Sequential, Element): pass
class list_item(Part, Element): pass
class definition_list(Sequential, Element): pass
class definition_list_item(Part, Element): pass
class term(Part, TextElement): pass
class classifier(Part, TextElement): pass
class definition(Part, Element): pass
class field_list(Sequential, Element): pass
class field(Part, Element): pass
class field_name(Part, TextElement): pass
class field_body(Part, Element): pass


class option(Part, Element):

    child_text_separator = ''


class option_argument(Part, TextElement):

    def astext(self):
        return self.get('delimiter', ' ') + TextElement.astext(self)


class option_group(Part, Element):

    child_text_separator = ', '


class option_list(Sequential, Element): pass


class option_list_item(Part, Element):

    child_text_separator = '  '


class option_string(Part, TextElement): pass
class description(Part, Element): pass
class literal_block(General, FixedTextElement): pass
class doctest_block(General, FixedTextElement): pass
class line_block(General, FixedTextElement): pass
class block_quote(General, Element): pass
class attribution(Part, TextElement): pass
class attention(Admonition, Element): pass
class caution(Admonition, Element): pass
class danger(Admonition, Element): pass
class error(Admonition, Element): pass
class important(Admonition, Element): pass
class note(Admonition, Element): pass
class tip(Admonition, Element): pass
class hint(Admonition, Element): pass
class warning(Admonition, Element): pass
class admonition(Admonition, Element): pass
class comment(Special, Invisible, PreBibliographic, FixedTextElement): pass
class substitution_definition(Special, Invisible, TextElement): pass
class target(Special, Invisible, Inline, TextElement, Targetable): pass
class footnote(General, Element, Labeled, BackLinkable): pass
class citation(General, Element, Labeled, BackLinkable): pass
class label(Part, TextElement): pass
class figure(General, Element): pass
class caption(Part, TextElement): pass
class legend(Part, Element): pass
class table(General, Element): pass
class tgroup(Part, Element): pass
class colspec(Part, Element): pass
class thead(Part, Element): pass
class tbody(Part, Element): pass
class row(Part, Element): pass
class entry(Part, Element): pass


class system_message(Special, PreBibliographic, Element, BackLinkable):

    def __init__(self, message=None, *children, **attributes):
        if message:
            p = paragraph('', message)
            children = (p,) + children
        try:
            Element.__init__(self, '', *children, **attributes)
        except:
            print 'system_message: children=%r' % (children,)
            raise

    def astext(self):
        line = self.get('line', '')
        return u'%s:%s: (%s/%s) %s' % (self['source'], line, self['type'],
                                       self['level'], Element.astext(self))


class pending(Special, Invisible, PreBibliographic, Element):

    """
    The "pending" element is used to encapsulate a pending operation: the
    operation (transform), the point at which to apply it, and any data it
    requires.  Only the pending operation's location within the document is
    stored in the public document tree (by the "pending" object itself); the
    operation and its data are stored in the "pending" object's internal
    instance attributes.

    For example, say you want a table of contents in your reStructuredText
    document.  The easiest way to specify where to put it is from within the
    document, with a directive::

        .. contents::

    But the "contents" directive can't do its work until the entire document
    has been parsed and possibly transformed to some extent.  So the directive
    code leaves a placeholder behind that will trigger the second phase of its
    processing, something like this::

        <pending ...public attributes...> + internal attributes

    Use `document.note_pending()` so that the
    `docutils.transforms.Transformer` stage of processing can run all pending
    transforms.
    """

    def __init__(self, transform, details=None,
                 rawsource='', *children, **attributes):
        Element.__init__(self, rawsource, *children, **attributes)

        self.transform = transform
        """The `docutils.transforms.Transform` class implementing the pending
        operation."""

        self.details = details or {}
        """Detail data (dictionary) required by the pending operation."""

    def pformat(self, indent='    ', level=0):
        internals = [
              '.. internal attributes:',
              '     .transform: %s.%s' % (self.transform.__module__,
                                          self.transform.__name__),
              '     .details:']
        details = self.details.items()
        details.sort()
        for key, value in details:
            if isinstance(value, Node):
                internals.append('%7s%s:' % ('', key))
                internals.extend(['%9s%s' % ('', line)
                                  for line in value.pformat().splitlines()])
            elif value and isinstance(value, ListType) \
                  and isinstance(value[0], Node):
                internals.append('%7s%s:' % ('', key))
                for v in value:
                    internals.extend(['%9s%s' % ('', line)
                                      for line in v.pformat().splitlines()])
            else:
                internals.append('%7s%s: %r' % ('', key, value))
        return (Element.pformat(self, indent, level)
                + ''.join([('    %s%s\n' % (indent * level, line))
                           for line in internals]))

    def copy(self):
        return self.__class__(self.transform, self.details, self.rawsource,
                              **self.attribuates)


class raw(Special, Inline, PreBibliographic, FixedTextElement):

    """
    Raw data that is to be passed untouched to the Writer.
    """

    pass


# =================
#  Inline Elements
# =================

class emphasis(Inline, TextElement): pass
class strong(Inline, TextElement): pass
class literal(Inline, TextElement): pass
class reference(General, Inline, Referential, TextElement): pass
class footnote_reference(Inline, Referential, TextElement): pass
class citation_reference(Inline, Referential, TextElement): pass
class substitution_reference(Inline, TextElement): pass
class title_reference(Inline, TextElement): pass
class abbreviation(Inline, TextElement): pass
class acronym(Inline, TextElement): pass
class superscript(Inline, TextElement): pass
class subscript(Inline, TextElement): pass


class image(General, Inline, TextElement):

    def astext(self):
        return self.get('alt', '')


class inline(Inline, TextElement): pass
class problematic(Inline, TextElement): pass
class generated(Inline, TextElement): pass


# ========================================
#  Auxiliary Classes, Functions, and Data
# ========================================

node_class_names = """
    Text
    abbreviation acronym address admonition attention attribution author
        authors
    block_quote bullet_list
    caption caution citation citation_reference classifier colspec comment
        contact copyright
    danger date decoration definition definition_list definition_list_item
        description docinfo doctest_block document
    emphasis entry enumerated_list error
    field field_body field_list field_name figure footer
        footnote footnote_reference
    generated
    header hint
    image important info inline
    label legend line_block list_item literal literal_block
    note
    option option_argument option_group option_list option_list_item
        option_string organization
    paragraph pending problematic
    raw reference revision row rubric
    section sidebar status strong subscript substitution_definition
        substitution_reference subtitle superscript system_message
    table target tbody term tgroup thead tip title title_reference topic
        transition
    version
    warning""".split()
"""A list of names of all concrete Node subclasses."""


class NodeVisitor:

    """
    "Visitor" pattern [GoF95]_ abstract superclass implementation for document
    tree traversals.

    Each node class has corresponding methods, doing nothing by default;
    override individual methods for specific and useful behaviour.  The
    "``visit_`` + node class name" method is called by `Node.walk()` upon
    entering a node.  `Node.walkabout()` also calls the "``depart_`` + node
    class name" method before exiting a node.

    This is a base class for visitors whose ``visit_...`` & ``depart_...``
    methods should be implemented for *all* node types encountered (such as
    for `docutils.writers.Writer` subclasses).  Unimplemented methods will
    raise exceptions.

    For sparse traversals, where only certain node types are of interest,
    subclass `SparseNodeVisitor` instead.  When (mostly or entirely) uniform
    processing is desired, subclass `GenericNodeVisitor`.

    .. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
       Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
       1995.
    """

    def __init__(self, document):
        self.document = document

    def unknown_visit(self, node):
        """
        Called when entering unknown `Node` types.

        Raise an exception unless overridden.
        """
        raise NotImplementedError('%s visiting unknown node type: %s'
                                  % (self.__class__, node.__class__.__name__))

    def unknown_departure(self, node):
        """
        Called before exiting unknown `Node` types.

        Raise exception unless overridden.
        """
        raise NotImplementedError('%s departing unknown node type: %s'
                                  % (self.__class__, node.__class__.__name__))


class SparseNodeVisitor(NodeVisitor):

    """
    Base class for sparse traversals, where only certain node types are of
    interest.  When ``visit_...`` & ``depart_...`` methods should be
    implemented for *all* node types (such as for `docutils.writers.Writer`
    subclasses), subclass `NodeVisitor` instead.
    """

class GenericNodeVisitor(NodeVisitor):

    """
    Generic "Visitor" abstract superclass, for simple traversals.

    Unless overridden, each ``visit_...`` method calls `default_visit()`, and
    each ``depart_...`` method (when using `Node.walkabout()`) calls
    `default_departure()`. `default_visit()` (and `default_departure()`) must
    be overridden in subclasses.

    Define fully generic visitors by overriding `default_visit()` (and
    `default_departure()`) only. Define semi-generic visitors by overriding
    individual ``visit_...()`` (and ``depart_...()``) methods also.

    `NodeVisitor.unknown_visit()` (`NodeVisitor.unknown_departure()`) should
    be overridden for default behavior.
    """

    def default_visit(self, node):
        """Override for generic, uniform traversals."""
        raise NotImplementedError

    def default_departure(self, node):
        """Override for generic, uniform traversals."""
        raise NotImplementedError

def _call_default_visit(self, node):
    self.default_visit(node)

def _call_default_departure(self, node):
    self.default_departure(node)

def _nop(self, node):
    pass

def _add_node_class_names(names):
    """Save typing with dynamic assignments:"""
    for _name in names:
        setattr(GenericNodeVisitor, "visit_" + _name, _call_default_visit)
        setattr(GenericNodeVisitor, "depart_" + _name, _call_default_departure)
        setattr(SparseNodeVisitor, 'visit_' + _name, _nop)
        setattr(SparseNodeVisitor, 'depart' + _name, _nop)

_add_node_class_names(node_class_names)

class TreeCopyVisitor(GenericNodeVisitor):

    """
    Make a complete copy of a tree or branch, including element attributes.
    """

    def __init__(self, document):
        GenericNodeVisitor.__init__(self, document)
        self.parent_stack = []
        self.parent = []

    def get_tree_copy(self):
        return self.parent[0]

    def default_visit(self, node):
        """Copy the current node, and make it the new acting parent."""
        newnode = node.copy()
        self.parent.append(newnode)
        self.parent_stack.append(self.parent)
        self.parent = newnode

    def default_departure(self, node):
        """Restore the previous acting parent."""
        self.parent = self.parent_stack.pop()


class TreePruningException(Exception):

    """
    Base class for `NodeVisitor`-related tree pruning exceptions.

    Raise subclasses from within ``visit_...`` or ``depart_...`` methods
    called from `Node.walk()` and `Node.walkabout()` tree traversals to prune
    the tree traversed.
    """

    pass


class SkipChildren(TreePruningException):

    """
    Do not visit any children of the current node.  The current node's
    siblings and ``depart_...`` method are not affected.
    """

    pass


class SkipSiblings(TreePruningException):

    """
    Do not visit any more siblings (to the right) of the current node.  The
    current node's children and its ``depart_...`` method are not affected.
    """

    pass


class SkipNode(TreePruningException):

    """
    Do not visit the current node's children, and do not call the current
    node's ``depart_...`` method.
    """

    pass


class SkipDeparture(TreePruningException):

    """
    Do not call the current node's ``depart_...`` method.  The current node's
    children and siblings are not affected.
    """

    pass


class NodeFound(TreePruningException):

    """
    Raise to indicate that the target of a search has been found.  This
    exception must be caught by the client; it is not caught by the traversal
    code.
    """

    pass


def make_id(string):
    """
    Convert `string` into an identifier and return it.

    Docutils identifiers will conform to the regular expression
    ``[a-z](-?[a-z0-9]+)*``.  For CSS compatibility, identifiers (the "class"
    and "id" attributes) should have no underscores, colons, or periods.
    Hyphens may be used.

    - The `HTML 4.01 spec`_ defines identifiers based on SGML tokens:

          ID and NAME tokens must begin with a letter ([A-Za-z]) and may be
          followed by any number of letters, digits ([0-9]), hyphens ("-"),
          underscores ("_"), colons (":"), and periods (".").

    - However the `CSS1 spec`_ defines identifiers based on the "name" token,
      a tighter interpretation ("flex" tokenizer notation; "latin1" and
      "escape" 8-bit characters have been replaced with entities)::

          unicode     \\[0-9a-f]{1,4}
          latin1      [&iexcl;-&yuml;]
          escape      {unicode}|\\[ -~&iexcl;-&yuml;]
          nmchar      [-a-z0-9]|{latin1}|{escape}
          name        {nmchar}+

    The CSS1 "nmchar" rule does not include underscores ("_"), colons (":"),
    or periods ("."), therefore "class" and "id" attributes should not contain
    these characters. They should be replaced with hyphens ("-"). Combined
    with HTML's requirements (the first character must be a letter; no
    "unicode", "latin1", or "escape" characters), this results in the
    ``[a-z](-?[a-z0-9]+)*`` pattern.

    .. _HTML 4.01 spec: http://www.w3.org/TR/html401
    .. _CSS1 spec: http://www.w3.org/TR/REC-CSS1
    """
    id = _non_id_chars.sub('-', ' '.join(string.lower().split()))
    id = _non_id_at_ends.sub('', id)
    return str(id)

_non_id_chars = re.compile('[^a-z0-9]+')
_non_id_at_ends = re.compile('^[-0-9]+|-+$')

def dupname(node):
    node['dupname'] = node['name']
    del node['name']

def fully_normalize_name(name):
    """Return a case- and whitespace-normalized name."""
    return ' '.join(name.lower().split())

def whitespace_normalize_name(name):
    """Return a whitespace-normalized name."""
    return ' '.join(name.split())


=== Added File Zope/lib/python/third_party/docutils/docutils/statemachine.py ===
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
# Revision: $Revision: 1.1.4.1 $
# Date: $Date: 2004/10/29 19:08:20 $
# Copyright: This module has been placed in the public domain.

"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:

- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.

Exception classes:

- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.

Functions:

- `string2lines()`: split a multi-line string into a list of one-line strings


How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)

1. Import it: ``import statemachine`` or ``from statemachine import ...``.
   You will also need to ``import re``.

2. Derive a subclass of `State` (or `StateWS`) for each state in your state
   machine::

       class MyState(statemachine.State):

   Within the state's class definition:

   a) Include a pattern for each transition, in `State.patterns`::

          patterns = {'atransition': r'pattern', ...}

   b) Include a list of initial transitions to be set up automatically, in
      `State.initial_transitions`::

          initial_transitions = ['atransition', ...]

   c) Define a method for each transition, with the same name as the
      transition pattern::

          def atransition(self, match, context, next_state):
              # do something
              result = [...]  # a list
              return context, next_state, result
              # context, next_state may be altered

      Transition methods may raise an `EOFError` to cut processing short.

   d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
      transition methods, which handle the beginning- and end-of-file.

   e) In order to handle nested processing, you may wish to override the
      attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.

      If you are using `StateWS` as a base class, in order to handle nested
      indented blocks, you may wish to:

      - override the attributes `StateWS.indent_sm`,
        `StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
        `StateWS.known_indent_sm_kwargs`;
      - override the `StateWS.blank()` method; and/or
      - override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
        and/or `StateWS.firstknown_indent()` methods.

3. Create a state machine object::

       sm = StateMachine(state_classes=[MyState, ...],
                         initial_state='MyState')

4. Obtain the input text, which needs to be converted into a tab-free list of
   one-line strings. For example, to read text from a file called
   'inputfile'::

       input_string = open('inputfile').read()
       input_lines = statemachine.string2lines(input_string)

5. Run the state machine on the input text and collect the results, a list::

       results = sm.run(input_lines)

6. Remove any lingering circular references::

       sm.unlink()
"""

__docformat__ = 'restructuredtext'

import sys
import re
from types import SliceType as _SliceType


class StateMachine:

    """
    A finite state machine for text filters using regular expressions.

    The input is provided in the form of a list of one-line strings (no
    newlines). States are subclasses of the `State` class. Transitions consist
    of regular expression patterns and transition methods, and are defined in
    each state.

    The state machine is started with the `run()` method, which returns the
    results of processing in a list.
    """

    def __init__(self, state_classes, initial_state, debug=0):
        """
        Initialize a `StateMachine` object; add state objects.

        Parameters:

        - `state_classes`: a list of `State` (sub)classes.
        - `initial_state`: a string, the class name of the initial state.
        - `debug`: a boolean; produce verbose output if true (nonzero).
        """

        self.input_lines = None
        """`StringList` of input lines (without newlines).
        Filled by `self.run()`."""

        self.input_offset = 0
        """Offset of `self.input_lines` from the beginning of the file."""

        self.line = None
        """Current input line."""

        self.line_offset = -1
        """Current input line offset from beginning of `self.input_lines`."""

        self.debug = debug
        """Debugging mode on/off."""

        self.initial_state = initial_state
        """The name of the initial state (key to `self.states`)."""

        self.current_state = initial_state
        """The name of the current state (key to `self.states`)."""

        self.states = {}
        """Mapping of {state_name: State_object}."""

        self.add_states(state_classes)

        self.observers = []
        """List of bound methods or functions to call whenever the current
        line changes.  Observers are called with one argument, ``self``.
        Cleared at the end of `run()`."""

    def unlink(self):
        """Remove circular references to objects no longer required."""
        for state in self.states.values():
            state.unlink()
        self.states = None

    def run(self, input_lines, input_offset=0, context=None,
            input_source=None):
        """
        Run the state machine on `input_lines`. Return results (a list).

        Reset `self.line_offset` and `self.current_state`. Run the
        beginning-of-file transition. Input one line at a time and check for a
        matching transition. If a match is found, call the transition method
        and possibly change the state. Store the context returned by the
        transition method to be passed on to the next transition matched.
        Accumulate the results returned by the transition methods in a list.
        Run the end-of-file transition. Finally, return the accumulated
        results.

        Parameters:

        - `input_lines`: a list of strings without newlines, or `StringList`.
        - `input_offset`: the line offset of `input_lines` from the beginning
          of the file.
        - `context`: application-specific storage.
        - `input_source`: name or path of source of `input_lines`.
        """
        self.runtime_init()
        if isinstance(input_lines, StringList):
            self.input_lines = input_lines
        else:
            self.input_lines = StringList(input_lines, source=input_source)
        self.input_offset = input_offset
        self.line_offset = -1
        self.current_state = self.initial_state
        if self.debug:
            print >>sys.stderr, (
                '\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
                % (self.line_offset, '\n| '.join(self.input_lines)))
        transitions = None
        results = []
        state = self.get_state()
        try:
            if self.debug:
                print >>sys.stderr, ('\nStateMachine.run: bof transition')
            context, result = state.bof(context)
            results.extend(result)
            while 1:
                try:
                    try:
                        self.next_line()
                        if self.debug:
                            source, offset = self.input_lines.info(
                                self.line_offset)
                            print >>sys.stderr, (
                                '\nStateMachine.run: line (source=%r, '
                                'offset=%r):\n| %s'
                                % (source, offset, self.line))
                        context, next_state, result = self.check_line(
                            context, state, transitions)
                    except EOFError:
                        if self.debug:
                            print >>sys.stderr, (
                                '\nStateMachine.run: %s.eof transition'
                                % state.__class__.__name__)
                        result = state.eof(context)
                        results.extend(result)
                        break
                    else:
                        results.extend(result)
                except TransitionCorrection, exception:
                    self.previous_line() # back up for another try
                    transitions = (exception.args[0],)
                    if self.debug:
                        print >>sys.stderr, (
                              '\nStateMachine.run: TransitionCorrection to '
                              'state "%s", transition %s.'
                              % (state.__class__.__name__, transitions[0]))
                    continue
                except StateCorrection, exception:
                    self.previous_line() # back up for another try
                    next_state = exception.args[0]
                    if len(exception.args) == 1:
                        transitions = None
                    else:
                        transitions = (exception.args[1],)
                    if self.debug:
                        print >>sys.stderr, (
                              '\nStateMachine.run: StateCorrection to state '
                              '"%s", transition %s.'
                              % (next_state, transitions[0]))
                else:
                    transitions = None
                state = self.get_state(next_state)
        except:
            if self.debug:
                self.error()
            raise
        self.observers = []
        return results

    def get_state(self, next_state=None):
        """
        Return current state object; set it first if `next_state` given.

        Parameter `next_state`: a string, the name of the next state.

        Exception: `UnknownStateError` raised if `next_state` unknown.
        """
        if next_state:
            if self.debug and next_state != self.current_state:
                print >>sys.stderr, \
                      ('\nStateMachine.get_state: Changing state from '
                       '"%s" to "%s" (input line %s).'
                       % (self.current_state, next_state,
                          self.abs_line_number()))
            self.current_state = next_state
        try:
            return self.states[self.current_state]
        except KeyError:
            raise UnknownStateError(self.current_state)

    def next_line(self, n=1):
        """Load `self.line` with the `n`'th next line and return it."""
        try:
            try:
                self.line_offset += n
                self.line = self.input_lines[self.line_offset]
            except IndexError:
                self.line = None
                raise EOFError
            return self.line
        finally:
            self.notify_observers()

    def is_next_line_blank(self):
        """Return 1 if the next line is blank or non-existant."""
        try:
            return not self.input_lines[self.line_offset + 1].strip()
        except IndexError:
            return 1

    def at_eof(self):
        """Return 1 if the input is at or past end-of-file."""
        return self.line_offset >= len(self.input_lines) - 1

    def at_bof(self):
        """Return 1 if the input is at or before beginning-of-file."""
        return self.line_offset <= 0

    def previous_line(self, n=1):
        """Load `self.line` with the `n`'th previous line and return it."""
        self.line_offset -= n
        if self.line_offset < 0:
            self.line = None
        else:
            self.line = self.input_lines[self.line_offset]
        self.notify_observers()
        return self.line

    def goto_line(self, line_offset):
        """Jump to absolute line offset `line_offset`, load and return it."""
        try:
            try:
                self.line_offset = line_offset - self.input_offset
                self.line = self.input_lines[self.line_offset]
            except IndexError:
                self.line = None
                raise EOFError
            return self.line
        finally:
            self.notify_observers()

    def get_source(self, line_offset):
        """Return source of line at absolute line offset `line_offset`."""
        return self.input_lines.source(line_offset - self.input_offset)

    def abs_line_offset(self):
        """Return line offset of current line, from beginning of file."""
        return self.line_offset + self.input_offset

    def abs_line_number(self):
        """Return line number of current line (counting from 1)."""
        return self.line_offset + self.input_offset + 1

    def insert_input(self, input_lines, source):
        self.input_lines.insert(self.line_offset + 1, '',
                                source='internal padding')
        self.input_lines.insert(self.line_offset + 1, '',
                                source='internal padding')
        self.input_lines.insert(self.line_offset + 2,
                                StringList(input_lines, source))

    def get_text_block(self, flush_left=0):
        """
        Return a contiguous block of text.

        If `flush_left` is true, raise `UnexpectedIndentationError` if an
        indented line is encountered before the text block ends (with a blank
        line).
        """
        try:
            block = self.input_lines.get_text_block(self.line_offset,
                                                    flush_left)
            self.next_line(len(block) - 1)
            return block
        except UnexpectedIndentationError, error:
            block, source, lineno = error
            self.next_line(len(block) - 1) # advance to last line of block
            raise

    def check_line(self, context, state, transitions=None):
        """
        Examine one line of input for a transition match & execute its method.

        Parameters:

        - `context`: application-dependent storage.
        - `state`: a `State` object, the current state.
        - `transitions`: an optional ordered list of transition names to try,
          instead of ``state.transition_order``.

        Return the values returned by the transition method:

        - context: possibly modified from the parameter `context`;
        - next state name (`State` subclass name);
        - the result output of the transition, a list.

        When there is no match, ``state.no_match()`` is called and its return
        value is returned.
        """
        if transitions is None:
            transitions =  state.transition_order
        state_correction = None
        if self.debug:
            print >>sys.stderr, (
                  '\nStateMachine.check_line: state="%s", transitions=%r.'
                  % (state.__class__.__name__, transitions))
        for name in transitions:
            pattern, method, next_state = state.transitions[name]
            match = self.match(pattern)
            if match:
                if self.debug:
                    print >>sys.stderr, (
                          '\nStateMachine.check_line: Matched transition '
                          '"%s" in state "%s".'
                          % (name, state.__class__.__name__))
                return method(match, context, next_state)
        else:
            if self.debug:
                print >>sys.stderr, (
                      '\nStateMachine.check_line: No match in state "%s".'
                      % state.__class__.__name__)
            return state.no_match(context, transitions)

    def match(self, pattern):
        """
        Return the result of a regular expression match.

        Parameter `pattern`: an `re` compiled regular expression.
        """
        return pattern.match(self.line)

    def add_state(self, state_class):
        """
        Initialize & add a `state_class` (`State` subclass) object.

        Exception: `DuplicateStateError` raised if `state_class` was already
        added.
        """
        statename = state_class.__name__
        if self.states.has_key(statename):
            raise DuplicateStateError(statename)
        self.states[statename] = state_class(self, self.debug)

    def add_states(self, state_classes):
        """
        Add `state_classes` (a list of `State` subclasses).
        """
        for state_class in state_classes:
            self.add_state(state_class)

    def runtime_init(self):
        """
        Initialize `self.states`.
        """
        for state in self.states.values():
            state.runtime_init()

    def error(self):
        """Report error details."""
        type, value, module, line, function = _exception_data()
        print >>sys.stderr, '%s: %s' % (type, value)
        print >>sys.stderr, 'input line %s' % (self.abs_line_number())
        print >>sys.stderr, ('module %s, line %s, function %s'
                             % (module, line, function))

    def attach_observer(self, observer):
        """
        The `observer` parameter is a function or bound method which takes two
        arguments, the source and offset of the current line.
        """
        self.observers.append(observer)

    def detach_observer(self, observer):
        self.observers.remove(observer)

    def notify_observers(self):
        for observer in self.observers:
            try:
                info = self.input_lines.info(self.line_offset)
            except IndexError:
                info = (None, None)
            observer(*info)


class State:

    """
    State superclass. Contains a list of transitions, and transition methods.

    Transition methods all have the same signature. They take 3 parameters:

    - An `re` match object. ``match.string`` contains the matched input line,
      ``match.start()`` gives the start index of the match, and
      ``match.end()`` gives the end index.
    - A context object, whose meaning is application-defined (initial value
      ``None``). It can be used to store any information required by the state
      machine, and the retured context is passed on to the next transition
      method unchanged.
    - The name of the next state, a string, taken from the transitions list;
      normally it is returned unchanged, but it may be altered by the
      transition method if necessary.

    Transition methods all return a 3-tuple:

    - A context object, as (potentially) modified by the transition method.
    - The next state name (a return value of ``None`` means no state change).
    - The processing result, a list, which is accumulated by the state
      machine.

    Transition methods may raise an `EOFError` to cut processing short.

    There are two implicit transitions, and corresponding transition methods
    are defined: `bof()` handles the beginning-of-file, and `eof()` handles
    the end-of-file. These methods have non-standard signatures and return
    values. `bof()` returns the initial context and results, and may be used
    to return a header string, or do any other processing needed. `eof()`
    should handle any remaining context and wrap things up; it returns the
    final processing result.

    Typical applications need only subclass `State` (or a subclass), set the
    `patterns` and `initial_transitions` class attributes, and provide
    corresponding transition methods. The default object initialization will
    take care of constructing the list of transitions.
    """

    patterns = None
    """
    {Name: pattern} mapping, used by `make_transition()`. Each pattern may
    be a string or a compiled `re` pattern. Override in subclasses.
    """

    initial_transitions = None
    """
    A list of transitions to initialize when a `State` is instantiated.
    Each entry is either a transition name string, or a (transition name, next
    state name) pair. See `make_transitions()`. Override in subclasses.
    """

    nested_sm = None
    """
    The `StateMachine` class for handling nested processing.

    If left as ``None``, `nested_sm` defaults to the class of the state's
    controlling state machine. Override it in subclasses to avoid the default.
    """

    nested_sm_kwargs = None
    """
    Keyword arguments dictionary, passed to the `nested_sm` constructor.

    Two keys must have entries in the dictionary:

    - Key 'state_classes' must be set to a list of `State` classes.
    - Key 'initial_state' must be set to the name of the initial state class.

    If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
    class of the current state, and 'initial_state' defaults to the name of
    the class of the current state. Override in subclasses to avoid the
    defaults.
    """

    def __init__(self, state_machine, debug=0):
        """
        Initialize a `State` object; make & add initial transitions.

        Parameters:

        - `statemachine`: the controlling `StateMachine` object.
        - `debug`: a boolean; produce verbose output if true (nonzero).
        """

        self.transition_order = []
        """A list of transition names in search order."""

        self.transitions = {}
        """
        A mapping of transition names to 3-tuples containing
        (compiled_pattern, transition_method, next_state_name). Initialized as
        an instance attribute dynamically (instead of as a class attribute)
        because it may make forward references to patterns and methods in this
        or other classes.
        """

        self.add_initial_transitions()

        self.state_machine = state_machine
        """A reference to the controlling `StateMachine` object."""

        self.debug = debug
        """Debugging mode on/off."""

        if self.nested_sm is None:
            self.nested_sm = self.state_machine.__class__
        if self.nested_sm_kwargs is None:
            self.nested_sm_kwargs = {'state_classes': [self.__class__],
                                     'initial_state': self.__class__.__name__}

    def runtime_init(self):
        """
        Initialize this `State` before running the state machine; called from
        `self.state_machine.run()`.
        """
        pass

    def unlink(self):
        """Remove circular references to objects no longer required."""
        self.state_machine = None

    def add_initial_transitions(self):
        """Make and add transitions listed in `self.initial_transitions`."""
        if self.initial_transitions:
            names, transitions = self.make_transitions(
                  self.initial_transitions)
            self.add_transitions(names, transitions)

    def add_transitions(self, names, transitions):
        """
        Add a list of transitions to the start of the transition list.

        Parameters:

        - `names`: a list of transition names.
        - `transitions`: a mapping of names to transition tuples.

        Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
        """
        for name in names:
            if self.transitions.has_key(name):
                raise DuplicateTransitionError(name)
            if not transitions.has_key(name):
                raise UnknownTransitionError(name)
        self.transition_order[:0] = names
        self.transitions.update(transitions)

    def add_transition(self, name, transition):
        """
        Add a transition to the start of the transition list.

        Parameter `transition`: a ready-made transition 3-tuple.

        Exception: `DuplicateTransitionError`.
        """
        if self.transitions.has_key(name):
            raise DuplicateTransitionError(name)
        self.transition_order[:0] = [name]
        self.transitions[name] = transition

    def remove_transition(self, name):
        """
        Remove a transition by `name`.

        Exception: `UnknownTransitionError`.
        """
        try:
            del self.transitions[name]
            self.transition_order.remove(name)
        except:
            raise UnknownTransitionError(name)

    def make_transition(self, name, next_state=None):
        """
        Make & return a transition tuple based on `name`.

        This is a convenience function to simplify transition creation.

        Parameters:

        - `name`: a string, the name of the transition pattern & method. This
          `State` object must have a method called '`name`', and a dictionary
          `self.patterns` containing a key '`name`'.
        - `next_state`: a string, the name of the next `State` object for this
          transition. A value of ``None`` (or absent) implies no state change
          (i.e., continue with the same state).

        Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`.
        """
        if next_state is None:
            next_state = self.__class__.__name__
        try:
            pattern = self.patterns[name]
            if not hasattr(pattern, 'match'):
                pattern = re.compile(pattern)
        except KeyError:
            raise TransitionPatternNotFound(
                  '%s.patterns[%r]' % (self.__class__.__name__, name))
        try:
            method = getattr(self, name)
        except AttributeError:
            raise TransitionMethodNotFound(
                  '%s.%s' % (self.__class__.__name__, name))
        return (pattern, method, next_state)

    def make_transitions(self, name_list):
        """
        Return a list of transition names and a transition mapping.

        Parameter `name_list`: a list, where each entry is either a transition
        name string, or a 1- or 2-tuple (transition name, optional next state
        name).
        """
        stringtype = type('')
        names = []
        transitions = {}
        for namestate in name_list:
            if type(namestate) is stringtype:
                transitions[namestate] = self.make_transition(namestate)
                names.append(namestate)
            else:
                transitions[namestate[0]] = self.make_transition(*namestate)
                names.append(namestate[0])
        return names, transitions

    def no_match(self, context, transitions):
        """
        Called when there is no match from `StateMachine.check_line()`.

        Return the same values returned by transition methods:

        - context: unchanged;
        - next state name: ``None``;
        - empty result list.

        Override in subclasses to catch this event.
        """
        return context, None, []

    def bof(self, context):
        """
        Handle beginning-of-file. Return unchanged `context`, empty result.

        Override in subclasses.

        Parameter `context`: application-defined storage.
        """
        return context, []

    def eof(self, context):
        """
        Handle end-of-file. Return empty result.

        Override in subclasses.

        Parameter `context`: application-defined storage.
        """
        return []

    def nop(self, match, context, next_state):
        """
        A "do nothing" transition method.

        Return unchanged `context` & `next_state`, empty result. Useful for
        simple state changes (actionless transitions).
        """
        return context, next_state, []


class StateMachineWS(StateMachine):

    """
    `StateMachine` subclass specialized for whitespace recognition.

    There are three methods provided for extracting indented text blocks:
    
    - `get_indented()`: use when the indent is unknown.
    - `get_known_indented()`: use when the indent is known for all lines.
    - `get_first_known_indented()`: use when only the first line's indent is
      known.
    """

    def get_indented(self, until_blank=0, strip_indent=1):
        """
        Return a block of indented lines of text, and info.

        Extract an indented block where the indent is unknown for all lines.

        :Parameters:
            - `until_blank`: Stop collecting at the first blank line if true
              (1).
            - `strip_indent`: Strip common leading indent if true (1,
              default).

        :Return:
            - the indented block (a list of lines of text),
            - its indent,
            - its first line offset from BOF, and
            - whether or not it finished with a blank line.
        """
        offset = self.abs_line_offset()
        indented, indent, blank_finish = self.input_lines.get_indented(
              self.line_offset, until_blank, strip_indent)
        if indented:
            self.next_line(len(indented) - 1) # advance to last indented line
        while indented and not indented[0].strip():
            indented.trim_start()
            offset += 1
        return indented, indent, offset, blank_finish

    def get_known_indented(self, indent, until_blank=0, strip_indent=1):
        """
        Return an indented block and info.

        Extract an indented block where the indent is known for all lines.
        Starting with the current line, extract the entire text block with at
        least `indent` indentation (which must be whitespace, except for the
        first line).

        :Parameters:
            - `indent`: The number of indent columns/characters.
            - `until_blank`: Stop collecting at the first blank line if true
              (1).
            - `strip_indent`: Strip `indent` characters of indentation if true
              (1, default).

        :Return:
            - the indented block,
            - its first line offset from BOF, and
            - whether or not it finished with a blank line.
        """
        offset = self.abs_line_offset()
        indented, indent, blank_finish = self.input_lines.get_indented(
              self.line_offset, until_blank, strip_indent,
              block_indent=indent)
        self.next_line(len(indented) - 1) # advance to last indented line
        while indented and not indented[0].strip():
            indented.trim_start()
            offset += 1
        return indented, offset, blank_finish

    def get_first_known_indented(self, indent, until_blank=0, strip_indent=1,
                                 strip_top=1):
        """
        Return an indented block and info.

        Extract an indented block where the indent is known for the first line
        and unknown for all other lines.

        :Parameters:
            - `indent`: The first line's indent (# of columns/characters).
            - `until_blank`: Stop collecting at the first blank line if true
              (1).
            - `strip_indent`: Strip `indent` characters of indentation if true
              (1, default).
            - `strip_top`: Strip blank lines from the beginning of the block.

        :Return:
            - the indented block,
            - its indent,
            - its first line offset from BOF, and
            - whether or not it finished with a blank line.
        """
        offset = self.abs_line_offset()
        indented, indent, blank_finish = self.input_lines.get_indented(
              self.line_offset, until_blank, strip_indent,
              first_indent=indent)
        self.next_line(len(indented) - 1) # advance to last indented line
        if strip_top:
            while indented and not indented[0].strip():
                indented.trim_start()
                offset += 1
        return indented, indent, offset, blank_finish


class StateWS(State):

    """
    State superclass specialized for whitespace (blank lines & indents).

    Use this class with `StateMachineWS`.  The transitions 'blank' (for blank
    lines) and 'indent' (for indented text blocks) are added automatically,
    before any other transitions.  The transition method `blank()` handles
    blank lines and `indent()` handles nested indented blocks.  Indented
    blocks trigger a new state machine to be created by `indent()` and run.
    The class of the state machine to be created is in `indent_sm`, and the
    constructor keyword arguments are in the dictionary `indent_sm_kwargs`.

    The methods `known_indent()` and `firstknown_indent()` are provided for
    indented blocks where the indent (all lines' and first line's only,
    respectively) is known to the transition method, along with the attributes
    `known_indent_sm` and `known_indent_sm_kwargs`.  Neither transition method
    is triggered automatically.
    """

    indent_sm = None
    """
    The `StateMachine` class handling indented text blocks.

    If left as ``None``, `indent_sm` defaults to the value of
    `State.nested_sm`.  Override it in subclasses to avoid the default.
    """

    indent_sm_kwargs = None
    """
    Keyword arguments dictionary, passed to the `indent_sm` constructor.

    If left as ``None``, `indent_sm_kwargs` defaults to the value of
    `State.nested_sm_kwargs`. Override it in subclasses to avoid the default.
    """

    known_indent_sm = None
    """
    The `StateMachine` class handling known-indented text blocks.

    If left as ``None``, `known_indent_sm` defaults to the value of
    `indent_sm`.  Override it in subclasses to avoid the default.
    """

    known_indent_sm_kwargs = None
    """
    Keyword arguments dictionary, passed to the `known_indent_sm` constructor.

    If left as ``None``, `known_indent_sm_kwargs` defaults to the value of
    `indent_sm_kwargs`. Override it in subclasses to avoid the default.
    """

    ws_patterns = {'blank': ' *$',
                   'indent': ' +'}
    """Patterns for default whitespace transitions.  May be overridden in
    subclasses."""

    ws_initial_transitions = ('blank', 'indent')
    """Default initial whitespace transitions, added before those listed in
    `State.initial_transitions`.  May be overridden in subclasses."""

    def __init__(self, state_machine, debug=0):
        """
        Initialize a `StateSM` object; extends `State.__init__()`.

        Check for indent state machine attributes, set defaults if not set.
        """
        State.__init__(self, state_machine, debug)
        if self.indent_sm is None:
            self.indent_sm = self.nested_sm
        if self.indent_sm_kwargs is None:
            self.indent_sm_kwargs = self.nested_sm_kwargs
        if self.known_indent_sm is None:
            self.known_indent_sm = self.indent_sm
        if self.known_indent_sm_kwargs is None:
            self.known_indent_sm_kwargs = self.indent_sm_kwargs

    def add_initial_transitions(self):
        """
        Add whitespace-specific transitions before those defined in subclass.

        Extends `State.add_initial_transitions()`.
        """
        State.add_initial_transitions(self)
        if self.patterns is None:
            self.patterns = {}
        self.patterns.update(self.ws_patterns)
        names, transitions = self.make_transitions(
            self.ws_initial_transitions)
        self.add_transitions(names, transitions)

    def blank(self, match, context, next_state):
        """Handle blank lines. Does nothing. Override in subclasses."""
        return self.nop(match, context, next_state)

    def indent(self, match, context, next_state):
        """
        Handle an indented text block. Extend or override in subclasses.

        Recursively run the registered state machine for indented blocks
        (`self.indent_sm`).
        """
        indented, indent, line_offset, blank_finish = \
              self.state_machine.get_indented()
        sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
        results = sm.run(indented, input_offset=line_offset)
        return context, next_state, results

    def known_indent(self, match, context, next_state):
        """
        Handle a known-indent text block. Extend or override in subclasses.

        Recursively run the registered state machine for known-indent indented
        blocks (`self.known_indent_sm`). The indent is the length of the
        match, ``match.end()``.
        """
        indented, line_offset, blank_finish = \
              self.state_machine.get_known_indented(match.end())
        sm = self.known_indent_sm(debug=self.debug,
                                 **self.known_indent_sm_kwargs)
        results = sm.run(indented, input_offset=line_offset)
        return context, next_state, results

    def first_known_indent(self, match, context, next_state):
        """
        Handle an indented text block (first line's indent known).

        Extend or override in subclasses.

        Recursively run the registered state machine for known-indent indented
        blocks (`self.known_indent_sm`). The indent is the length of the
        match, ``match.end()``.
        """
        indented, line_offset, blank_finish = \
              self.state_machine.get_first_known_indented(match.end())
        sm = self.known_indent_sm(debug=self.debug,
                                 **self.known_indent_sm_kwargs)
        results = sm.run(indented, input_offset=line_offset)
        return context, next_state, results


class _SearchOverride:

    """
    Mix-in class to override `StateMachine` regular expression behavior.

    Changes regular expression matching, from the default `re.match()`
    (succeeds only if the pattern matches at the start of `self.line`) to
    `re.search()` (succeeds if the pattern matches anywhere in `self.line`).
    When subclassing a `StateMachine`, list this class **first** in the
    inheritance list of the class definition.
    """

    def match(self, pattern):
        """
        Return the result of a regular expression search.

        Overrides `StateMachine.match()`.

        Parameter `pattern`: `re` compiled regular expression.
        """
        return pattern.search(self.line)


class SearchStateMachine(_SearchOverride, StateMachine):
    """`StateMachine` which uses `re.search()` instead of `re.match()`."""
    pass


class SearchStateMachineWS(_SearchOverride, StateMachineWS):
    """`StateMachineWS` which uses `re.search()` instead of `re.match()`."""
    pass


class ViewList:

    """
    List with extended functionality: slices of ViewList objects are child
    lists, linked to their parents. Changes made to a child list also affect
    the parent list.  A child list is effectively a "view" (in the SQL sense)
    of the parent list.  Changes to parent lists, however, do *not* affect
    active child lists.  If a parent list is changed, any active child lists
    should be recreated.

    The start and end of the slice can be trimmed using the `trim_start()` and
    `trim_end()` methods, without affecting the parent list.  The link between
    child and parent lists can be broken by calling `disconnect()` on the
    child list.

    Also, ViewList objects keep track of the source & offset of each item. 
    This information is accessible via the `source()`, `offset()`, and
    `info()` methods.
    """

    def __init__(self, initlist=None, source=None, items=None,
                 parent=None, parent_offset=None):
        self.data = []
        """The actual list of data, flattened from various sources."""

        self.items = []
        """A list of (source, offset) pairs, same length as `self.data`: the
        source of each line and the offset of each line from the beginning of
        its source."""

        self.parent = parent
        """The parent list."""

        self.parent_offset = parent_offset
        """Offset of this list from the beginning of the parent list."""

        if isinstance(initlist, ViewList):
            self.data = initlist.data[:]
            self.items = initlist.items[:]
        elif initlist is not None:
            self.data = list(initlist)
            if items:
                self.items = items
            else:
                self.items = [(source, i) for i in range(len(initlist))]
        assert len(self.data) == len(self.items), 'data mismatch'

    def __str__(self):
        return str(self.data)

    def __repr__(self):
        return '%s(%s, items=%s)' % (self.__class__.__name__,
                                     self.data, self.items)

    def __lt__(self, other): return self.data <  self.__cast(other)
    def __le__(self, other): return self.data <= self.__cast(other)
    def __eq__(self, other): return self.data == self.__cast(other)
    def __ne__(self, other): return self.data != self.__cast(other)
    def __gt__(self, other): return self.data >  self.__cast(other)
    def __ge__(self, other): return self.data >= self.__cast(other)
    def __cmp__(self, other): return cmp(self.data, self.__cast(other))

    def __cast(self, other):
        if isinstance(other, ViewList):
            return other.data
        else:
            return other

    def __contains__(self, item): return item in self.data
    def __len__(self): return len(self.data)

    # The __getitem__()/__setitem__() methods check whether the index
    # is a slice first, since native list objects start supporting
    # them directly in Python 2.3 (no exception is raised when
    # indexing a list with a slice object; they just work).

    def __getitem__(self, i):
        if isinstance(i, _SliceType):
            assert i.step in (None, 1),  'cannot handle slice with stride'
            return self.__class__(self.data[i.start:i.stop],
                                  items=self.items[i.start:i.stop],
                                  parent=self, parent_offset=i.start)
        else:
            return self.data[i]

    def __setitem__(self, i, item):
        if isinstance(i, _SliceType):
            assert i.step in (None, 1), 'cannot handle slice with stride'
            if not isinstance(item, ViewList):
                raise TypeError('assigning non-ViewList to ViewList slice')
            self.data[i.start:i.stop] = item.data
            self.items[i.start:i.stop] = item.items
            assert len(self.data) == len(self.items), 'data mismatch'
            if self.parent:
                self.parent[i.start + self.parent_offset
                            : i.stop + self.parent_offset] = item
        else:
            self.data[i] = item
            if self.parent:
                self.parent[i + self.parent_offset] = item

    def __delitem__(self, i):
        try:
            del self.data[i]
            del self.items[i]
            if self.parent:
                del self.parent[i + self.parent_offset]
        except TypeError:
            assert i.step is None, 'cannot handle slice with stride'
            del self.data[i.start:i.stop]
            del self.items[i.start:i.stop]
            if self.parent:
                del self.parent[i.start + self.parent_offset
                                : i.stop + self.parent_offset]

    def __add__(self, other):
        if isinstance(other, ViewList):
            return self.__class__(self.data + other.data,
                                  items=(self.items + other.items))
        else:
            raise TypeError('adding non-ViewList to a ViewList')

    def __radd__(self, other):
        if isinstance(other, ViewList):
            return self.__class__(other.data + self.data,
                                  items=(other.items + self.items))
        else:
            raise TypeError('adding ViewList to a non-ViewList')

    def __iadd__(self, other):
        if isinstance(other, ViewList):
            self.data += other.data
        else:
            raise TypeError('argument to += must be a ViewList')
        return self

    def __mul__(self, n):
        return self.__class__(self.data * n, items=(self.items * n))

    __rmul__ = __mul__

    def __imul__(self, n):
        self.data *= n
        self.items *= n
        return self

    def extend(self, other):
        if not isinstance(other, ViewList):
            raise TypeError('extending a ViewList with a non-ViewList')
        if self.parent:
            self.parent.insert(len(self.data) + self.parent_offset, other)
        self.data.extend(other.data)
        self.items.extend(other.items)

    def append(self, item, source=None, offset=0):
        if source is None:
            self.extend(item)
        else:
            if self.parent:
                self.parent.insert(len(self.data) + self.parent_offset, item,
                                   source, offset)
            self.data.append(item)
            self.items.append((source, offset))

    def insert(self, i, item, source=None, offset=0):
        if source is None:
            if not isinstance(item, ViewList):
                raise TypeError('inserting non-ViewList with no source given')
            self.data[i:i] = item.data
            self.items[i:i] = item.items
            if self.parent:
                index = (len(self.data) + i) % len(self.data)
                self.parent.insert(index + self.parent_offset, item)
        else:
            self.data.insert(i, item)
            self.items.insert(i, (source, offset))
            if self.parent:
                index = (len(self.data) + i) % len(self.data)
                self.parent.insert(index + self.parent_offset, item,
                                   source, offset)

    def pop(self, i=-1):
        if self.parent:
            index = (len(self.data) + i) % len(self.data)
            self.parent.pop(index + self.parent_offset)
        self.items.pop(i)
        return self.data.pop(i)

    def trim_start(self, n=1):
        """
        Remove items from the start of the list, without touching the parent.
        """
        if n > len(self.data):
            raise IndexError("Size of trim too large; can't trim %s items "
                             "from a list of size %s." % (n, len(self.data)))
        elif n < 0:
            raise IndexError('Trim size must be >= 0.')
        del self.data[:n]
        del self.items[:n]
        if self.parent:
            self.parent_offset += n

    def trim_end(self, n=1):
        """
        Remove items from the end of the list, without touching the parent.
        """
        if n > len(self.data):
            raise IndexError("Size of trim too large; can't trim %s items "
                             "from a list of size %s." % (n, len(self.data)))
        elif n < 0:
            raise IndexError('Trim size must be >= 0.')
        del self.data[-n:]
        del self.items[-n:]

    def remove(self, item):
        index = self.index(item)
        del self[index]

    def count(self, item): return self.data.count(item)
    def index(self, item): return self.data.index(item)

    def reverse(self):
        self.data.reverse()
        self.items.reverse()
        self.parent = None

    def sort(self, *args):
        tmp = zip(self.data, self.items)
        tmp.sort(*args)
        self.data = [entry[0] for entry in tmp]
        self.items = [entry[1] for entry in tmp]
        self.parent = None

    def info(self, i):
        """Return source & offset for index `i`."""
        try:
            return self.items[i]
        except IndexError:
            if i == len(self.data):     # Just past the end
                return self.items[i - 1][0], None
            else:
                raise

    def source(self, i):
        """Return source for index `i`."""
        return self.info(i)[0]

    def offset(self, i):
        """Return offset for index `i`."""
        return self.info(i)[1]

    def disconnect(self):
        """Break link between this list and parent list."""
        self.parent = None


class StringList(ViewList):

    """A `ViewList` with string-specific methods."""

    def trim_left(self, length, start=0, end=sys.maxint):
        """
        Trim `length` characters off the beginning of each item, in-place,
        from index `start` to `end`.  No whitespace-checking is done on the
        trimmed text.  Does not affect slice parent.
        """
        self.data[start:end] = [line[length:]
                                for line in self.data[start:end]]

    def get_text_block(self, start, flush_left=0):
        """
        Return a contiguous block of text.

        If `flush_left` is true, raise `UnexpectedIndentationError` if an
        indented line is encountered before the text block ends (with a blank
        line).
        """
        end = start
        last = len(self.data)
        while end < last:
            line = self.data[end]
            if not line.strip():
                break
            if flush_left and (line[0] == ' '):
                source, offset = self.info(end)
                raise UnexpectedIndentationError(self[start:end], source,
                                                 offset + 1)
            end += 1
        return self[start:end]

    def get_indented(self, start=0, until_blank=0, strip_indent=1,
                     block_indent=None, first_indent=None):
        """
        Extract and return a StringList of indented lines of text.

        Collect all lines with indentation, determine the minimum indentation,
        remove the minimum indentation from all indented lines (unless
        `strip_indent` is false), and return them. All lines up to but not
        including the first unindented line will be returned.

        :Parameters:
          - `start`: The index of the first line to examine.
          - `until_blank`: Stop collecting at the first blank line if true.
          - `strip_indent`: Strip common leading indent if true (default).
          - `block_indent`: The indent of the entire block, if known.
          - `first_indent`: The indent of the first line, if known.

        :Return:
          - a StringList of indented lines with mininum indent removed;
          - the amount of the indent;
          - a boolean: did the indented block finish with a blank line or EOF?
        """
        indent = block_indent           # start with None if unknown
        end = start
        if block_indent is not None and first_indent is None:
            first_indent = block_indent
        if first_indent is not None:
            end += 1
        last = len(self.data)
        while end < last:
            line = self.data[end]
            if line and (line[0] != ' '
                         or (block_indent is not None
                             and line[:block_indent].strip())):
                # Line not indented or insufficiently indented.
                # Block finished properly iff the last indented line blank:
                blank_finish = ((end > start)
                                and not self.data[end - 1].strip())
                break
            stripped = line.lstrip()
            if not stripped:            # blank line
                if until_blank:
                    blank_finish = 1
                    break
            elif block_indent is None:
                line_indent = len(line) - len(stripped)
                if indent is None:
                    indent = line_indent
                else:
                    indent = min(indent, line_indent)
            end += 1
        else:
            blank_finish = 1            # block ends at end of lines
        block = self[start:end]
        if first_indent is not None and block:
            block.data[0] = block.data[0][first_indent:]
        if indent and strip_indent:
            block.trim_left(indent, start=(first_indent is not None))
        return block, indent or 0, blank_finish

    def get_2D_block(self, top, left, bottom, right, strip_indent=1):
        block = self[top:bottom]
        indent = right
        for i in range(len(block.data)):
            block.data[i] = line = block.data[i][left:right].rstrip()
            if line:
                indent = min(indent, len(line) - len(line.lstrip()))
        if strip_indent and 0 < indent < right:
            block.data = [line[indent:] for line in block.data]
        return block


class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
class UnknownTransitionError(StateMachineError): pass
class DuplicateTransitionError(StateMachineError): pass
class TransitionPatternNotFound(StateMachineError): pass
class TransitionMethodNotFound(StateMachineError): pass
class UnexpectedIndentationError(StateMachineError): pass


class TransitionCorrection(Exception):

    """
    Raise from within a transition method to switch to another transition.

    Raise with one argument, the new transition name.
    """


class StateCorrection(Exception):

    """
    Raise from within a transition method to switch to another state.

    Raise with one or two arguments: new state name, and an optional new
    transition name.
    """


def string2lines(astring, tab_width=8, convert_whitespace=0,
                 whitespace=re.compile('[\v\f]')):
    """
    Return a list of one-line strings with tabs expanded and no newlines.

    Each tab is expanded with between 1 and `tab_width` spaces, so that the
    next character's index becomes a multiple of `tab_width` (8 by default).

    Parameters:

    - `astring`: a multi-line string.
    - `tab_width`: the number of columns between tab stops.
    - `convert_whitespace`: convert form feeds and vertical tabs to spaces?
    """
    if convert_whitespace:
        astring = whitespace.sub(' ', astring)
    return [s.expandtabs(tab_width) for s in astring.splitlines()]

def _exception_data():
    """
    Return exception information:

    - the exception's class name;
    - the exception object;
    - the name of the file containing the offending code;
    - the line number of the offending code;
    - the function name of the offending code.
    """
    type, value, traceback = sys.exc_info()
    while traceback.tb_next:
        traceback = traceback.tb_next
    code = traceback.tb_frame.f_code
    return (type.__name__, value, code.co_filename, traceback.tb_lineno,
            code.co_name)


=== Added File Zope/lib/python/third_party/docutils/docutils/urischemes.py ===
"""
`schemes` is a dictionary with lowercase URI addressing schemes as
keys and descriptions as values. It was compiled from the index at
http://www.iana.org/assignments/uri-schemes (revised 2003-11-26)
and an older list at http://www.w3.org/Addressing/schemes.html.
"""

# Many values are blank and should be filled in with useful descriptions.

schemes = {
      'about': 'provides information on Navigator',
      'acap': 'Application Configuration Access Protocol',
      'addbook': "To add vCard entries to Communicator's Address Book",
      'afp': 'Apple Filing Protocol',
      'afs': 'Andrew File System global file names',
      'aim': 'AOL Instant Messenger',
      'callto': 'for NetMeeting links',
      'castanet': 'Castanet Tuner URLs for Netcaster',
      'chttp': 'cached HTTP supported by RealPlayer',
      'cid': 'content identifier',
      'data': ('allows inclusion of small data items as "immediate" data; '
               'RFC 2397'),
      'dav': 'Distributed Authoring and Versioning Protocol; RFC 2518',
      'dns': 'Domain Name System resources',
      'eid': ('External ID; non-URL data; general escape mechanism to allow '
              'access to information for applications that are too '
              'specialized to justify their own schemes'),
      'fax': ('a connection to a terminal that can handle telefaxes '
              '(facsimiles); RFC 2806'),
      'feed' : 'NetNewsWire feed',
      'file': 'Host-specific file names',
      'finger': '',
      'freenet': '',
      'ftp': 'File Transfer Protocol',
      'go': 'go; RFC3368',
      'gopher': 'The Gopher Protocol',
      'gsm-sms': ('Global System for Mobile Communications Short Message '
                  'Service'),
      'h323': 'video (audiovisual) communication on local area networks',
      'h324': ('video and audio communications over low bitrate connections '
               'such as POTS modem connections'),
      'hdl': 'CNRI handle system',
      'hnews': 'an HTTP-tunneling variant of the NNTP news protocol',
      'http': 'Hypertext Transfer Protocol',
      'https': 'HTTP over SSL',
      'hydra': 'SubEthaEdit URI.  See http://www.codingmonkeys.de/subethaedit.',
      'iioploc': 'Internet Inter-ORB Protocol Location?',
      'ilu': 'Inter-Language Unification',
      'im': 'Instant Messaging',
      'imap': 'Internet Message Access Protocol',
      'ior': 'CORBA interoperable object reference',
      'ipp': 'Internet Printing Protocol',
      'irc': 'Internet Relay Chat',
      'iseek' : 'See www.ambrosiasw.com;  a little util for OS X.',
      'jar': 'Java archive',
      'javascript': ('JavaScript code; evaluates the expression after the '
                     'colon'),
      'jdbc': 'JDBC connection URI.',
      'ldap': 'Lightweight Directory Access Protocol',
      'lifn': '',
      'livescript': '',
      'lrq': '',
      'mailbox': 'Mail folder access',
      'mailserver': 'Access to data available from mail servers',
      'mailto': 'Electronic mail address',
      'md5': '',
      'mid': 'message identifier',
      'mocha': '',
      'modem': ('a connection to a terminal that can handle incoming data '
                'calls; RFC 2806'),
      'mupdate': 'Mailbox Update (MUPDATE) Protocol',
      'news': 'USENET news',
      'nfs': 'Network File System protocol',
      'nntp': 'USENET news using NNTP access',
      'opaquelocktoken': '',
      'phone': '',
      'pop': 'Post Office Protocol',
      'pop3': 'Post Office Protocol v3',
      'pres': 'Presence',
      'printer': '',
      'prospero': 'Prospero Directory Service',
      'rdar' : 'URLs found in Darwin source (http://www.opensource.apple.com/darwinsource/).',
      'res': '',
      'rtsp': 'real time streaming protocol',
      'rvp': '',
      'rwhois': '',
      'rx': 'Remote Execution',
      'sdp': '',
      'service': 'service location',
      'shttp': 'secure hypertext transfer protocol',
      'sip': 'Session Initiation Protocol',
      'sips': 'secure session intitiaion protocol',
      'smb': 'SAMBA filesystems.',
      'snews': 'For NNTP postings via SSL',
      'soap.beep': '',
      'soap.beeps': '',
      'ssh': 'Reference to interactive sessions via ssh.',
      't120': 'real time data conferencing (audiographics)',
      'tcp': '',
      'tel': ('a connection to a terminal that handles normal voice '
              'telephone calls, a voice mailbox or another voice messaging '
              'system or a service that can be operated using DTMF tones; '
              'RFC 2806.'),
      'telephone': 'telephone',
      'telnet': 'Reference to interactive sessions',
      'tftp': 'Trivial File Transfer Protocol',
      'tip': 'Transaction Internet Protocol',
      'tn3270': 'Interactive 3270 emulation sessions',
      'tv': '',
      'urn': 'Uniform Resource Name',
      'uuid': '',
      'vemmi': 'versatile multimedia interface',
      'videotex': '',
      'view-source': 'displays HTML code that was generated with JavaScript',
      'wais': 'Wide Area Information Servers',
      'whodp': '',
      'whois++': 'Distributed directory service.',
      'x-man-page': 'Opens man page in Terminal.app on OS X (see macosxhints.com)',
      'xmlrpc.beep': '',
      'xmlrpc.beeps': '',
      'z39.50r': 'Z39.50 Retrieval',
      'z39.50s': 'Z39.50 Session',}


=== Added File Zope/lib/python/third_party/docutils/docutils/utils.py ===
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
# Revision: $Revision: 1.1.4.1 $
# Date: $Date: 2004/10/29 19:08:20 $
# Copyright: This module has been placed in the public domain.

"""
Miscellaneous utilities for the documentation utilities.
"""

__docformat__ = 'reStructuredText'

import sys
import os
import os.path
from types import StringType, UnicodeType
from docutils import ApplicationError, DataError
from docutils import frontend, nodes


class SystemMessage(ApplicationError):

    def __init__(self, system_message, level):
        Exception.__init__(self, system_message.astext())
        self.level = level


class SystemMessagePropagation(ApplicationError): pass


class Reporter:

    """
    Info/warning/error reporter and ``system_message`` element generator.

    Five levels of system messages are defined, along with corresponding
    methods: `debug()`, `info()`, `warning()`, `error()`, and `severe()`.

    There is typically one Reporter object per process.  A Reporter object is
    instantiated with thresholds for reporting (generating warnings) and
    halting processing (raising exceptions), a switch to turn debug output on
    or off, and an I/O stream for warnings.  These are stored in the default
    reporting category, '' (zero-length string).

    Multiple reporting categories [#]_ may be set, each with its own reporting
    and halting thresholds, debugging switch, and warning stream
    (collectively a `ConditionSet`).  Categories are hierarchical dotted-name
    strings that look like attribute references: 'spam', 'spam.eggs',
    'neeeow.wum.ping'.  The 'spam' category is the ancestor of
    'spam.bacon.eggs'.  Unset categories inherit stored conditions from their
    closest ancestor category that has been set.

    When a system message is generated, the stored conditions from its
    category (or ancestor if unset) are retrieved.  The system message level
    is compared to the thresholds stored in the category, and a warning or
    error is generated as appropriate.  Debug messages are produced iff the
    stored debug switch is on.  Message output is sent to the stored warning
    stream if not set to ''.

    The default category is '' (empty string).  By convention, Writers should
    retrieve reporting conditions from the 'writer' category (which, unless
    explicitly set, defaults to the conditions of the default category).

    The Reporter class also employs a modified form of the "Observer" pattern
    [GoF95]_ to track system messages generated.  The `attach_observer` method
    should be called before parsing, with a bound method or function which
    accepts system messages.  The observer can be removed with
    `detach_observer`, and another added in its place.

    .. [#] The concept of "categories" was inspired by the log4j project:
       http://jakarta.apache.org/log4j/.

    .. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
       Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
       1995.
    """

    levels = 'DEBUG INFO WARNING ERROR SEVERE'.split()
    """List of names for system message levels, indexed by level."""

    def __init__(self, source, report_level, halt_level, stream=None,
                 debug=0, encoding='ascii', error_handler='replace'):
        """
        Initialize the `ConditionSet` forthe `Reporter`'s default category.

        :Parameters:

            - `source`: The path to or description of the source data.
            - `report_level`: The level at or above which warning output will
              be sent to `stream`.
            - `halt_level`: The level at or above which `SystemMessage`
              exceptions will be raised, halting execution.
            - `debug`: Show debug (level=0) system messages?
            - `stream`: Where warning output is sent.  Can be file-like (has a
              ``.write`` method), a string (file name, opened for writing), 
              '' (empty string, for discarding all stream messages) or
              `None` (implies `sys.stderr`; default).
            - `encoding`: The encoding for stderr output.
            - `error_handler`: The error handler for stderr output encoding.
        """
        self.source = source
        """The path to or description of the source data."""
        
        if stream is None:
            stream = sys.stderr
        elif type(stream) in (StringType, UnicodeType):
            # Leave stream untouched if it's ''.
            if stream != '':
                if type(stream) == StringType:
                    stream = open(stream, 'w')
                elif type(stream) == UnicodeType:
                    stream = open(stream.encode(), 'w')

        self.encoding = encoding
        """The character encoding for the stderr output."""

        self.error_handler = error_handler
        """The character encoding error handler."""

        self.categories = {'': ConditionSet(debug, report_level, halt_level,
                                            stream)}
        """Mapping of category names to conditions. Default category is ''."""

        self.observers = []
        """List of bound methods or functions to call with each system_message
        created."""

        self.max_level = -1
        """The highest level system message generated so far."""

    def set_conditions(self, category, report_level, halt_level,
                       stream=None, debug=0):
        if stream is None:
            stream = sys.stderr
        self.categories[category] = ConditionSet(debug, report_level,
                                                 halt_level, stream)

    def unset_conditions(self, category):
        if category and self.categories.has_key(category):
            del self.categories[category]

    __delitem__ = unset_conditions

    def get_conditions(self, category):
        while not self.categories.has_key(category):
            category = category[:category.rfind('.') + 1][:-1]
        return self.categories[category]

    __getitem__ = get_conditions

    def attach_observer(self, observer):
        """
        The `observer` parameter is a function or bound method which takes one
        argument, a `nodes.system_message` instance.
        """
        self.observers.append(observer)

    def detach_observer(self, observer):
        self.observers.remove(observer)

    def notify_observers(self, message):
        for observer in self.observers:
            observer(message)

    def system_message(self, level, message, *children, **kwargs):
        """
        Return a system_message object.

        Raise an exception or generate a warning if appropriate.
        """
        attributes = kwargs.copy()
        category = kwargs.get('category', '')
        if kwargs.has_key('category'):
            del attributes['category']
        if kwargs.has_key('base_node'):
            source, line = get_source_line(kwargs['base_node'])
            del attributes['base_node']
            if source is not None:
                attributes.setdefault('source', source)
            if line is not None:
                attributes.setdefault('line', line)
        attributes.setdefault('source', self.source)
        msg = nodes.system_message(message, level=level,
                                   type=self.levels[level],
                                   *children, **attributes)
        debug, report_level, halt_level, stream = self[category].astuple()
        if (level >= report_level or debug and level == 0) and stream:
            msgtext = msg.astext().encode(self.encoding, self.error_handler)
            if category:
                print >>stream, msgtext, '[%s]' % category
            else:
                print >>stream, msgtext
        if level >= halt_level:
            raise SystemMessage(msg, level)
        if level > 0 or debug:
            self.notify_observers(msg)
        self.max_level = max(level, self.max_level)
        return msg

    def debug(self, *args, **kwargs):
        """
        Level-0, "DEBUG": an internal reporting issue. Typically, there is no
        effect on the processing. Level-0 system messages are handled
        separately from the others.
        """
        return self.system_message(0, *args, **kwargs)

    def info(self, *args, **kwargs):
        """
        Level-1, "INFO": a minor issue that can be ignored. Typically there is
        no effect on processing, and level-1 system messages are not reported.
        """
        return self.system_message(1, *args, **kwargs)

    def warning(self, *args, **kwargs):
        """
        Level-2, "WARNING": an issue that should be addressed. If ignored,
        there may be unpredictable problems with the output.
        """
        return self.system_message(2, *args, **kwargs)

    def error(self, *args, **kwargs):
        """
        Level-3, "ERROR": an error that should be addressed. If ignored, the
        output will contain errors.
        """
        return self.system_message(3, *args, **kwargs)

    def severe(self, *args, **kwargs):
        """
        Level-4, "SEVERE": a severe error that must be addressed. If ignored,
        the output will contain severe errors. Typically level-4 system
        messages are turned into exceptions which halt processing.
        """
        return self.system_message(4, *args, **kwargs)


class ConditionSet:

    """
    A set of two thresholds (`report_level` & `halt_level`), a switch
    (`debug`), and an I/O stream (`stream`), corresponding to one `Reporter`
    category.
    """

    def __init__(self, debug, report_level, halt_level, stream):
        self.debug = debug
        self.report_level = report_level
        self.halt_level = halt_level
        self.stream = stream

    def astuple(self):
        return (self.debug, self.report_level, self.halt_level,
                self.stream)


class ExtensionOptionError(DataError): pass
class BadOptionError(ExtensionOptionError): pass
class BadOptionDataError(ExtensionOptionError): pass
class DuplicateOptionError(ExtensionOptionError): pass


def extract_extension_options(field_list, options_spec):
    """
    Return a dictionary mapping extension option names to converted values.

    :Parameters:
        - `field_list`: A flat field list without field arguments, where each
          field body consists of a single paragraph only.
        - `options_spec`: Dictionary mapping known option names to a
          conversion function such as `int` or `float`.

    :Exceptions:
        - `KeyError` for unknown option names.
        - `ValueError` for invalid option values (raised by the conversion
           function).
        - `TypeError` for invalid option value types (raised by conversion
           function).
        - `DuplicateOptionError` for duplicate options.
        - `BadOptionError` for invalid fields.
        - `BadOptionDataError` for invalid option data (missing name,
          missing data, bad quotes, etc.).
    """
    option_list = extract_options(field_list)
    option_dict = assemble_option_dict(option_list, options_spec)
    return option_dict

def extract_options(field_list):
    """
    Return a list of option (name, value) pairs from field names & bodies.

    :Parameter:
        `field_list`: A flat field list, where each field name is a single
        word and each field body consists of a single paragraph only.

    :Exceptions:
        - `BadOptionError` for invalid fields.
        - `BadOptionDataError` for invalid option data (missing name,
          missing data, bad quotes, etc.).
    """
    option_list = []
    for field in field_list:
        if len(field[0].astext().split()) != 1:
            raise BadOptionError(
                'extension option field name may not contain multiple words')
        name = str(field[0].astext().lower())
        body = field[1]
        if len(body) == 0:
            data = None
        elif len(body) > 1 or not isinstance(body[0], nodes.paragraph) \
              or len(body[0]) != 1 or not isinstance(body[0][0], nodes.Text):
            raise BadOptionDataError(
                  'extension option field body may contain\n'
                  'a single paragraph only (option "%s")' % name)
        else:
            data = body[0][0].astext()
        option_list.append((name, data))
    return option_list

def assemble_option_dict(option_list, options_spec):
    """
    Return a mapping of option names to values.

    :Parameters:
        - `option_list`: A list of (name, value) pairs (the output of
          `extract_options()`).
        - `options_spec`: Dictionary mapping known option names to a
          conversion function such as `int` or `float`.

    :Exceptions:
        - `KeyError` for unknown option names.
        - `DuplicateOptionError` for duplicate options.
        - `ValueError` for invalid option values (raised by conversion
           function).
        - `TypeError` for invalid option value types (raised by conversion
           function).
    """
    options = {}
    for name, value in option_list:
        convertor = options_spec[name]  # raises KeyError if unknown
        if convertor is None:
            raise KeyError(name)        # or if explicitly disabled
        if options.has_key(name):
            raise DuplicateOptionError('duplicate option "%s"' % name)
        try:
            options[name] = convertor(value)
        except (ValueError, TypeError), detail:
            raise detail.__class__('(option: "%s"; value: %r)\n%s'
                                   % (name, value, detail))
    return options


class NameValueError(DataError): pass


def extract_name_value(line):
    """
    Return a list of (name, value) from a line of the form "name=value ...".

    :Exception:
        `NameValueError` for invalid input (missing name, missing data, bad
        quotes, etc.).
    """
    attlist = []
    while line:
        equals = line.find('=')
        if equals == -1:
            raise NameValueError('missing "="')
        attname = line[:equals].strip()
        if equals == 0 or not attname:
            raise NameValueError(
                  'missing attribute name before "="')
        line = line[equals+1:].lstrip()
        if not line:
            raise NameValueError(
                  'missing value after "%s="' % attname)
        if line[0] in '\'"':
            endquote = line.find(line[0], 1)
            if endquote == -1:
                raise NameValueError(
                      'attribute "%s" missing end quote (%s)'
                      % (attname, line[0]))
            if len(line) > endquote + 1 and line[endquote + 1].strip():
                raise NameValueError(
                      'attribute "%s" end quote (%s) not followed by '
                      'whitespace' % (attname, line[0]))
            data = line[1:endquote]
            line = line[endquote+1:].lstrip()
        else:
            space = line.find(' ')
            if space == -1:
                data = line
                line = ''
            else:
                data = line[:space]
                line = line[space+1:].lstrip()
        attlist.append((attname.lower(), data))
    return attlist

def new_document(source, settings=None):
    """
    Return a new empty document object.

    :Parameters:
        `source` : string
            The path to or description of the source text of the document.
        `settings` : optparse.Values object
            Runtime settings.  If none provided, a default set will be used.
    """
    if settings is None:
        settings = frontend.OptionParser().get_default_values()
    reporter = Reporter(source, settings.report_level, settings.halt_level,
                        stream=settings.warning_stream, debug=settings.debug,
                        encoding=settings.error_encoding,
                        error_handler=settings.error_encoding_error_handler)
    document = nodes.document(settings, reporter, source=source)
    document.note_source(source, -1)
    return document

def clean_rcs_keywords(paragraph, keyword_substitutions):
    if len(paragraph) == 1 and isinstance(paragraph[0], nodes.Text):
        textnode = paragraph[0]
        for pattern, substitution in keyword_substitutions:
            match = pattern.search(textnode.data)
            if match:
                textnode.data = pattern.sub(substitution, textnode.data)
                return

def relative_path(source, target):
    """
    Build and return a path to `target`, relative to `source` (both files).

    If there is no common prefix, return the absolute path to `target`.
    """
    source_parts = os.path.abspath(source or 'dummy_file').split(os.sep)
    target_parts = os.path.abspath(target).split(os.sep)
    # Check first 2 parts because '/dir'.split('/') == ['', 'dir']:
    if source_parts[:2] != target_parts[:2]:
        # Nothing in common between paths.
        # Return absolute path, using '/' for URLs:
        return '/'.join(target_parts)
    source_parts.reverse()
    target_parts.reverse()
    while (source_parts and target_parts
           and source_parts[-1] == target_parts[-1]):
        # Remove path components in common:
        source_parts.pop()
        target_parts.pop()
    target_parts.reverse()
    parts = ['..'] * (len(source_parts) - 1) + target_parts
    return '/'.join(parts)

def get_source_line(node):
    """
    Return the "source" and "line" attributes from the `node` given or from
    its closest ancestor.
    """
    while node:
        if node.source or node.line:
            return node.source, node.line
        node = node.parent
    return None, None



More information about the Zope-Checkins mailing list