[Checkins] SVN: zope.testing/trunk/src/zope/testing/testrunner Merge the colorized-output branch with

Marius Gedminas marius at pov.lt
Sat Jul 14 18:03:36 EDT 2007


Log message for revision 77982:
  Merge the colorized-output branch with
  
    svn merge -r 77866:77980 svn+ssh://svn.zope.org/repos/main/zope.testing/branches/colorized-output .
  
  Adds a new test runner option -c that produces ANSI color sequences to
  highlight test failures, diffs and other interesting pieces of information.
  
  

Changed:
  A   zope.testing/trunk/src/zope/testing/testrunner-colors.txt
  U   zope.testing/trunk/src/zope/testing/testrunner-errors.txt
  U   zope.testing/trunk/src/zope/testing/testrunner.py

-=-
Copied: zope.testing/trunk/src/zope/testing/testrunner-colors.txt (from rev 77980, zope.testing/branches/colorized-output/src/zope/testing/testrunner-colors.txt)
===================================================================
--- zope.testing/trunk/src/zope/testing/testrunner-colors.txt	                        (rev 0)
+++ zope.testing/trunk/src/zope/testing/testrunner-colors.txt	2007-07-14 22:03:35 UTC (rev 77982)
@@ -0,0 +1,227 @@
+Colorful output
+===============
+
+If you're on a Unix-like system, you can ask for colorized output.  The test
+runner emits terminal control sequences to highlight important pieces of
+information (such as the names of failing tests) in different colors.
+
+    >>> import os.path, sys
+    >>> directory_with_tests = os.path.join(this_directory, 'testrunner-ex')
+    >>> defaults = [
+    ...     '--path', directory_with_tests,
+    ...     '--tests-pattern', '^sampletestsf?$',
+    ...     ]
+
+    >>> from zope.testing import testrunner
+
+Since it wouldn't be a good idea to have terminal control characters in a
+test file, let's wrap sys.stdout in a simple terminal interpreter
+
+    >>> import re
+    >>> class Terminal(object):
+    ...     _color_regexp = re.compile('\033[[]([0-9;]*)m')
+    ...     _colors = {'0': 'normal', '1': 'bold', '30': 'black', '31': 'red',
+    ...                '32': 'green', '33': 'yellow', '34': 'blue',
+    ...                '35': 'magenta', '36': 'cyan', '37': 'grey'}
+    ...     def __init__(self, stream):
+    ...         self._stream = stream
+    ...     def __getattr__(self, attr):
+    ...         return getattr(self._stream, attr)
+    ...     def write(self, text):
+    ...         if '\033[' in text:
+    ...             text = self._color_regexp.sub(self._color, text)
+    ...         self._stream.write(text)
+    ...     def writelines(self, lines):
+    ...         for line in lines:
+    ...             self.write(line)
+    ...     def _color(self, match):
+    ...         colorstring = '{'
+    ...         for number in match.group(1).split(';'):
+    ...             colorstring += self._colors.get(number, '?')
+    ...         return colorstring + '}'
+
+    >>> real_stdout = sys.stdout
+    >>> sys.stdout = Terminal(sys.stdout)
+
+A successful test run soothes the developer with warm green colors:
+
+    >>> sys.argv = 'test --layer 122 -c'.split()
+    >>> testrunner.run(defaults)
+    {normal}Running samplelayers.Layer122 tests:{normal}
+      Set up samplelayers.Layer1 in 0.000 seconds.
+      Set up samplelayers.Layer12 in 0.000 seconds.
+      Set up samplelayers.Layer122 in 0.000 seconds.
+    {normal}  Ran {green}34{normal} tests with {green}0{normal} failures and {green}0{normal} errors in {green}0.007{normal} seconds.{normal}
+    {normal}Tearing down left over layers:{normal}
+      Tear down samplelayers.Layer122 in 0.000 seconds.
+      Tear down samplelayers.Layer12 in 0.000 seconds.
+      Tear down samplelayers.Layer1 in 0.000 seconds.
+    False
+
+A failed test run highlights the failures in red:
+
+    >>> sys.argv = 'test -c --tests-pattern ^sampletests(f|_e|_f)?$ '.split()
+    >>> testrunner.run(defaults)
+    {normal}Running unit tests:{normal}
+    <BLANKLINE>
+    <BLANKLINE>
+    {boldred}Failure in test eek (sample2.sampletests_e){normal}
+    Failed doctest test for sample2.sampletests_e.eek
+      File "testrunner-ex/sample2/sampletests_e.py", line 28, in eek
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    {normal}File "{boldblue}testrunner-ex/sample2/sampletests_e.py{normal}", line {boldred}30{normal}, in {boldcyan}sample2.sampletests_e.eek{normal}
+    Failed example:
+    {cyan}    f(){normal}
+    Exception raised:
+    {red}    Traceback (most recent call last):{normal}
+    {red}      File ".../doctest.py", line 1356, in __run{normal}
+    {red}        compileflags, 1) in test.globs{normal}
+    {red}      File "<doctest sample2.sampletests_e.eek[0]>", line 1, in ?{normal}
+    {red}        f(){normal}
+    {red}      File "testrunner-ex/sample2/sampletests_e.py", line 19, in f{normal}
+    {red}        g(){normal}
+    {red}      File "testrunner-ex/sample2/sampletests_e.py", line 24, in g{normal}
+    {red}        x = y + 1{normal}
+    {red}    NameError: global name 'y' is not defined{normal}
+    <BLANKLINE>
+    <BLANKLINE>
+    <BLANKLINE>
+    {boldred}Error in test test3 (sample2.sampletests_e.Test){normal}
+    Traceback (most recent call last):
+    {normal}  File "{boldblue}unittest.py{normal}", line {boldred}260{normal}, in {boldcyan}run{normal}
+    {cyan}    testMethod(){normal}
+    {normal}  File "{boldblue}testrunner-ex/sample2/sampletests_e.py{normal}", line {boldred}43{normal}, in {boldcyan}test3{normal}
+    {cyan}    f(){normal}
+    {normal}  File "{boldblue}testrunner-ex/sample2/sampletests_e.py{normal}", line {boldred}19{normal}, in {boldcyan}f{normal}
+    {cyan}    g(){normal}
+    {normal}  File "{boldblue}testrunner-ex/sample2/sampletests_e.py{normal}", line {boldred}24{normal}, in {boldcyan}g{normal}
+    {cyan}    x = y + 1{normal}
+    {red}NameError: global name 'y' is not defined{normal}
+    <BLANKLINE>
+    <BLANKLINE>
+    <BLANKLINE>
+    {boldred}Failure in test testrunner-ex/sample2/e.txt{normal}
+    Failed doctest test for e.txt
+      File "testrunner-ex/sample2/e.txt", line 0
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    {normal}File "{boldblue}testrunner-ex/sample2/e.txt{normal}", line {boldred}4{normal}, in {boldcyan}e.txt{normal}
+    Failed example:
+    {cyan}    f(){normal}
+    Exception raised:
+    {red}    Traceback (most recent call last):{normal}
+    {red}      File ".../doctest.py", line 1356, in __run{normal}
+    {red}        compileflags, 1) in test.globs{normal}
+    {red}      File "<doctest e.txt[1]>", line 1, in ?{normal}
+    {red}        f(){normal}
+    {red}      File "<doctest e.txt[0]>", line 2, in f{normal}
+    {red}        return x{normal}
+    {red}    NameError: global name 'x' is not defined{normal}
+    <BLANKLINE>
+    <BLANKLINE>
+    <BLANKLINE>
+    {boldred}Failure in test test (sample2.sampletests_f.Test){normal}
+    Traceback (most recent call last):
+    {normal}  File "{boldblue}unittest.py{normal}", line {boldred}260{normal}, in {boldcyan}run{normal}
+    {cyan}    testMethod(){normal}
+    {normal}  File "{boldblue}testrunner-ex/sample2/sampletests_f.py{normal}", line {boldred}21{normal}, in {boldcyan}test{normal}
+    {cyan}    self.assertEqual(1,0){normal}
+    {normal}  File "{boldblue}unittest.py{normal}", line {boldred}333{normal}, in {boldcyan}failUnlessEqual{normal}
+    {cyan}    raise self.failureException, \{normal}
+    {red}AssertionError: 1 != 0{normal}
+    <BLANKLINE>
+    {normal}  Ran {green}200{normal} tests with {boldred}3{normal} failures and {boldred}1{normal} errors in {green}0.045{normal} seconds.{normal}
+    {normal}Running samplelayers.Layer1 tests:{normal}
+      Set up samplelayers.Layer1 in 0.000 seconds.
+    {normal}  Ran {green}9{normal} tests with {green}0{normal} failures and {green}0{normal} errors in {green}0.001{normal} seconds.{normal}
+    {normal}Running samplelayers.Layer11 tests:{normal}
+      Set up samplelayers.Layer11 in 0.000 seconds.
+    {normal}  Ran {green}34{normal} tests with {green}0{normal} failures and {green}0{normal} errors in {green}0.007{normal} seconds.{normal}
+    {normal}Running samplelayers.Layer111 tests:{normal}
+      Set up samplelayers.Layerx in 0.000 seconds.
+      Set up samplelayers.Layer111 in 0.000 seconds.
+    {normal}  Ran {green}34{normal} tests with {green}0{normal} failures and {green}0{normal} errors in {green}0.008{normal} seconds.{normal}
+    {normal}Running samplelayers.Layer112 tests:{normal}
+      Tear down samplelayers.Layer111 in 0.000 seconds.
+      Set up samplelayers.Layer112 in 0.000 seconds.
+    {normal}  Ran {green}34{normal} tests with {green}0{normal} failures and {green}0{normal} errors in {green}0.008{normal} seconds.{normal}
+    {normal}Running samplelayers.Layer12 tests:{normal}
+      Tear down samplelayers.Layer112 in 0.000 seconds.
+      Tear down samplelayers.Layerx in 0.000 seconds.
+      Tear down samplelayers.Layer11 in 0.000 seconds.
+      Set up samplelayers.Layer12 in 0.000 seconds.
+    {normal}  Ran {green}34{normal} tests with {green}0{normal} failures and {green}0{normal} errors in {green}0.007{normal} seconds.{normal}
+    {normal}Running samplelayers.Layer121 tests:{normal}
+      Set up samplelayers.Layer121 in 0.000 seconds.
+    {normal}  Ran {green}34{normal} tests with {green}0{normal} failures and {green}0{normal} errors in {green}0.007{normal} seconds.{normal}
+    {normal}Running samplelayers.Layer122 tests:{normal}
+      Tear down samplelayers.Layer121 in 0.000 seconds.
+      Set up samplelayers.Layer122 in 0.000 seconds.
+    {normal}  Ran {green}34{normal} tests with {green}0{normal} failures and {green}0{normal} errors in {green}0.008{normal} seconds.{normal}
+    {normal}Tearing down left over layers:{normal}
+      Tear down samplelayers.Layer122 in 0.000 seconds.
+      Tear down samplelayers.Layer12 in 0.000 seconds.
+      Tear down samplelayers.Layer1 in 0.000 seconds.
+    {normal}Total: {green}413{normal} tests, {boldred}3{normal} failures, {boldred}1{normal} errors{normal}
+    True
+
+The expected and actual outputs of failed doctests are shown in different
+colors:
+
+    >>> sys.argv = 'test --tests-pattern ^pledge$ -c'.split()
+    >>> _ = testrunner.run(defaults)
+    {normal}Running unit tests:{normal}
+    <BLANKLINE>
+    <BLANKLINE>
+    {boldred}Failure in test pledge (pledge){normal}
+    Failed doctest test for pledge.pledge
+      File "testrunner-ex/pledge.py", line 24, in pledge
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    {normal}File testrunner-ex/pledge.py{normal}", line {boldred}26{normal}, in {boldcyan}pledge.pledge{normal}
+    Failed example:
+    {cyan}    print pledge_template % ('and earthling', 'planet'),{normal}
+    Expected:
+    {green}    I give my pledge, as an earthling,{normal}
+    {green}    to save, and faithfully, to defend from waste,{normal}
+    {green}    the natural resources of my planet.{normal}
+    {green}    It's soils, minerals, forests, waters, and wildlife.{normal}
+    Got:
+    {red}    I give my pledge, as and earthling,{normal}
+    {red}    to save, and faithfully, to defend from waste,{normal}
+    {red}    the natural resources of my planet.{normal}
+    {red}    It's soils, minerals, forests, waters, and wildlife.{normal}
+    <BLANKLINE>
+    {normal}  Ran {green}1{normal} tests with {boldred}1{normal} failures and {green}0{normal} errors in {green}0.002{normal} seconds.{normal}
+
+Diffs are highlighted so you can easily tell the context and the mismatches
+apart:
+
+    >>> sys.argv = 'test --tests-pattern ^pledge$ --ndiff -c'.split()
+    >>> _ = testrunner.run(defaults)
+    {normal}Running unit tests:{normal}
+    <BLANKLINE>
+    <BLANKLINE>
+    {boldred}Failure in test pledge (pledge){normal}
+    Failed doctest test for pledge.pledge
+      File "testrunner-ex/pledge.py", line 24, in pledge
+    <BLANKLINE>
+    ----------------------------------------------------------------------
+    {normal}File testrunner-ex/pledge.py{normal}", line {boldred}26{normal}, in {boldcyan}pledge.pledge{normal}
+    Failed example:
+    {cyan}    print pledge_template % ('and earthling', 'planet'),{normal}
+    Differences (ndiff with -expected +actual):
+    {green}    - I give my pledge, as an earthling,{normal}
+    {red}    + I give my pledge, as and earthling,{normal}
+    {magenta}    ?                        +{normal}
+    {normal}      to save, and faithfully, to defend from waste,{normal}
+    {normal}      the natural resources of my planet.{normal}
+    {normal}      It's soils, minerals, forests, waters, and wildlife.{normal}
+    <BLANKLINE>
+    {normal}  Ran {green}1{normal} tests with {boldred}1{normal} failures and {green}0{normal} errors in {green}0.003{normal} seconds.{normal}
+
+Clean up:
+
+    >>> sys.stdout = real_stdout
+

Modified: zope.testing/trunk/src/zope/testing/testrunner-errors.txt
===================================================================
--- zope.testing/trunk/src/zope/testing/testrunner-errors.txt	2007-07-14 21:59:41 UTC (rev 77981)
+++ zope.testing/trunk/src/zope/testing/testrunner-errors.txt	2007-07-14 22:03:35 UTC (rev 77982)
@@ -683,7 +683,7 @@
 
 If a doctest has large expected and actual output, it can be hard to
 see differences when expected and actual output differ.  The --ndiff,
---udiff, and --cdiff options can be used to get diff output pf various
+--udiff, and --cdiff options can be used to get diff output of various
 kinds.
 
     >>> sys.argv = 'test --tests-pattern ^pledge$'.split()

Modified: zope.testing/trunk/src/zope/testing/testrunner.py
===================================================================
--- zope.testing/trunk/src/zope/testing/testrunner.py	2007-07-14 21:59:41 UTC (rev 77981)
+++ zope.testing/trunk/src/zope/testing/testrunner.py	2007-07-14 22:03:35 UTC (rev 77982)
@@ -326,7 +326,7 @@
         """Report an error with a big ASCII banner."""
         print
         print '*'*70
-        print message
+        self.error(message)
         print '*'*70
         print
 
@@ -367,16 +367,16 @@
             for test in import_errors:
                 print "  " + test.module
 
+    def summary(self, n_tests, n_failures, n_errors, n_seconds):
+        """Summarize the results of a single test layer."""
+        print ("  Ran %s tests with %s failures and %s errors in %.3f seconds."
+               % (n_tests, n_failures, n_errors, n_seconds))
+
     def totals(self, n_tests, n_failures, n_errors):
-        """Report totals (number of tests, failures, and errors)."""
+        """Summarize the results of all layers."""
         print "Total: %s tests, %s failures, %s errors" % (
                         n_tests, n_failures, n_errors)
 
-    def summary(self, n_tests, n_failures, n_errors, n_seconds):
-        """Summarize the results."""
-        print ("  Ran %s tests with %s failures and %s errors in %.3f seconds."
-               % (n_tests, n_failures, n_errors, n_seconds))
-
     def list_of_tests(self, tests, layer_name):
         """Report a list of test names."""
         print "Listing %s tests:" % layer_name
@@ -527,7 +527,10 @@
         """Report an error with a traceback."""
         print
         print msg
+        print self.format_traceback(exc_info)
 
+    def format_traceback(self, exc_info):
+        """Format the traceback."""
         v = exc_info[1]
         if isinstance(v, doctest.DocTestFailureException):
             tb = v.args[0]
@@ -542,9 +545,8 @@
                 )
         else:
             tb = "".join(traceback.format_exception(*exc_info))
+        return tb
 
-        print tb
-
     def stop_test(self, test):
         """Clean up the output state after a test."""
         if self.progress:
@@ -561,6 +563,220 @@
             print
 
 
+class ColorfulOutputFormatter(OutputFormatter):
+    """Output formatter that uses ANSI color codes.
+
+    Like syntax highlighting in your text editor, colorizing
+    test failures helps the developer.
+    """
+
+    # These colors are carefully chosen to have enough contrast
+    # on terminals with both black and white background.
+    colorscheme = {'normal': 'normal',
+                   'default': 'default',
+                   'info': 'normal',
+                   'error': 'brightred',
+                   'number': 'green',
+                   'ok-number': 'green',
+                   'error-number': 'brightred',
+                   'filename': 'lightblue',
+                   'lineno': 'lightred',
+                   'testname': 'lightcyan',
+                   'failed-example': 'cyan',
+                   'expected-output': 'green',
+                   'actual-output': 'red',
+                   'character-diffs': 'magenta',
+                   'diff-chunk': 'magenta',
+                   'exception': 'red'}
+
+    # Map prefix character to color in diff output.  This handles ndiff and
+    # udiff correctly, but not cdiff.  In cdiff we ought to highlight '!' as
+    # expected-output until we see a '-', then highlight '!' as actual-output,
+    # until we see a '*', then switch back to highlighting '!' as
+    # expected-output.  Nevertheless, coloried cdiffs are reasonably readable,
+    # so I'm not going to fix this.
+    #   -- mgedmin
+    diff_color = {'-': 'expected-output',
+                  '+': 'actual-output',
+                  '?': 'character-diffs',
+                  '@': 'diff-chunk',
+                  '*': 'diff-chunk',
+                  '!': 'actual-output',}
+
+    prefixes = [('dark', '0;'),
+                ('light', '1;'),
+                ('bright', '1;'),
+                ('bold', '1;'),]
+
+    colorcodes = {'default': 0, 'normal': 0,
+                  'black': 30,
+                  'red': 31,
+                  'green': 32,
+                  'brown': 33, 'yellow': 33,
+                  'blue': 34,
+                  'magenta': 35,
+                  'cyan': 36,
+                  'grey': 37, 'gray': 37, 'white': 37}
+
+    def color_code(self, color):
+        """Convert a color description (e.g. 'lightgray') to a terminal code."""
+        prefix_code = ''
+        for prefix, code in self.prefixes:
+            if color.startswith(prefix):
+                color = color[len(prefix):]
+                prefix_code = code
+                break
+        color_code = self.colorcodes[color]
+        return '\033[%s%sm' % (prefix_code, color_code)
+
+    def color(self, what):
+        """Pick a named color from the color scheme"""
+        return self.color_code(self.colorscheme[what])
+
+    def colorize(self, what, message):
+        """Wrap message in color."""
+        return self.color(what) + message + self.color_code('normal')
+
+    def error_count_color(self, n):
+        """Choose a color for the number of errors."""
+        if n:
+            return self.color('error-number')
+        else:
+            return self.color('ok-number')
+
+    def info(self, message):
+        """Print an informative message."""
+        print self.colorize('info', message)
+
+    def error(self, message):
+        """Report an error."""
+        print self.colorize('error', message)
+
+    def error_with_banner(self, message):
+        """Report an error with a big ASCII banner."""
+        print
+        print self.colorize('error', '*'*70)
+        self.error(message)
+        print self.colorize('error', '*'*70)
+        print
+
+    def summary(self, n_tests, n_failures, n_errors, n_seconds):
+        """Summarize the results."""
+        sys.stdout.writelines([
+            self.color('info'), '  Ran ',
+            self.color('number'), str(n_tests),
+            self.color('info'), ' tests with ',
+            self.error_count_color(n_failures), str(n_failures),
+            self.color('info'), ' failures and ',
+            self.error_count_color(n_errors), str(n_errors),
+            self.color('info'), ' errors in ',
+            self.color('number'), '%.3f' % n_seconds,
+            self.color('info'), ' seconds.',
+            self.color('normal'), '\n'])
+
+    def totals(self, n_tests, n_failures, n_errors):
+        """Report totals (number of tests, failures, and errors)."""
+        sys.stdout.writelines([
+            self.color('info'), 'Total: ',
+            self.color('number'), str(n_tests),
+            self.color('info'), ' tests, ',
+            self.error_count_color(n_failures), str(n_failures),
+            self.color('info'), ' failures, ',
+            self.error_count_color(n_errors), str(n_errors),
+            self.color('info'), ' errors',
+            self.color('normal'), '\n'])
+
+    def print_traceback(self, msg, exc_info):
+        """Report an error with a traceback."""
+        print
+        print self.colorize('error', msg)
+        v = exc_info[1]
+        if isinstance(v, doctest.DocTestFailureException):
+            self.print_doctest_failure(v.args[0])
+        elif isinstance(v, doctest.DocTestFailure):
+            # I don't think these are ever used... -- mgedmin
+            tb = self.format_traceback(exc_info)
+            print tb
+        else:
+            tb = self.format_traceback(exc_info)
+            self.print_colorized_traceback(tb)
+
+    def print_doctest_failure(self, formatted_failure):
+        """Report a doctest failure.
+
+        ``formatted_failure`` is a string -- that's what
+        DocTestSuite/DocFileSute
+        """
+        color_of_indented_text = 'normal'
+        colorize_diff = False
+        for line in formatted_failure.splitlines():
+            if line.startswith('File '):
+                m = re.match(r'File "(.*)", line (\d*), in (.*)$', line)
+                if m:
+                    filename, lineno, test = m.groups()
+                    sys.stdout.writelines([
+                        self.color('normal'), 'File "',
+                        self.color('filename'), filename,
+                        self.color('normal'), '", line ',
+                        self.color('lineno'), lineno,
+                        self.color('normal'), ', in ',
+                        self.color('testname'), test,
+                        self.color('normal'), '\n'])
+                else:
+                    print line
+            elif line.startswith('    '):
+                if colorize_diff and len(line) > 4:
+                    color = self.diff_color.get(line[4], color_of_indented_text)
+                    print self.colorize(color, line)
+                else:
+                    print self.colorize(color_of_indented_text, line)
+            else:
+                colorize_diff = False
+                if line.startswith('Failed example'):
+                    color_of_indented_text = 'failed-example'
+                elif line.startswith('Expected:'):
+                    color_of_indented_text = 'expected-output'
+                elif line.startswith('Got:'):
+                    color_of_indented_text = 'actual-output'
+                elif line.startswith('Exception raised:'):
+                    color_of_indented_text = 'exception'
+                elif line.startswith('Differences '):
+                    color_of_indented_text = 'normal'
+                    colorize_diff = True
+                else:
+                    color_of_indented_text = 'normal'
+                print line
+        print
+
+    def print_colorized_traceback(self, formatted_traceback):
+        """Report a test failure.
+
+        ``formatted_traceback`` is a string.
+        """
+        for line in formatted_traceback.splitlines():
+            if line.startswith('  File'):
+                m = re.match(r'  File "(.*)", line (\d*), in (.*)$', line)
+                if m:
+                    filename, lineno, test = m.groups()
+                    sys.stdout.writelines([
+                        self.color('normal'), '  File "',
+                        self.color('filename'), filename,
+                        self.color('normal'), '", line ',
+                        self.color('lineno'), lineno,
+                        self.color('normal'), ', in ',
+                        self.color('testname'), test,
+                        self.color('normal'), '\n'])
+                else:
+                    print line
+            elif line.startswith('    '):
+                print self.colorize('failed-example', line)
+            elif line.startswith('Traceback (most recent call last)'):
+                print line
+            else:
+                print self.colorize('exception', line)
+        print
+
+
 def run(defaults=None, args=None):
     if args is None:
         args = sys.argv
@@ -1745,6 +1961,12 @@
 """)
 
 reporting.add_option(
+    '--color', '-c', action="store_true", dest='color',
+    help="""\
+Colorize the output.
+""")
+
+reporting.add_option(
     '-1', '--hide-secondary-failures',
     action="store_true", dest='report_only_first_failure',
     help="""\
@@ -2031,7 +2253,10 @@
     merge_options(options, defaults)
     options.original_testrunner_args = original_testrunner_args
 
-    options.output = OutputFormatter(options)
+    if options.color:
+        options.output = ColorfulOutputFormatter(options)
+    else:
+        options.output = OutputFormatter(options)
 
     options.fail = False
 
@@ -2201,6 +2426,7 @@
         (re.compile(r'\r'), '\\\\r\n'),
         (re.compile(r'\d+[.]\d\d\d seconds'), 'N.NNN seconds'),
         (re.compile(r'\d+[.]\d\d\d s'), 'N.NNN s'),
+        (re.compile(r'\d+[.]\d\d\d{'), 'N.NNN{'),
         (re.compile('( |")[^\n]+testrunner-ex'), r'\1testrunner-ex'),
         (re.compile('( |")[^\n]+testrunner.py'), r'\1testrunner.py'),
         (re.compile(r'> [^\n]*(doc|unit)test[.]py\(\d+\)'),
@@ -2245,6 +2471,7 @@
         'testrunner-layers.txt',
         'testrunner-layers-api.txt',
         'testrunner-progress.txt',
+        'testrunner-colors.txt',
         'testrunner-simple.txt',
         'testrunner-test-selection.txt',
         'testrunner-verbose.txt',



More information about the Checkins mailing list