[Checkins] SVN: zope.testing/branches/regebro-python3/ backmerge from trunk
Wolfgang Schnerring
wosc at wosc.de
Sun Sep 13 04:21:58 EDT 2009
Log message for revision 103878:
backmerge from trunk
Changed:
U zope.testing/branches/regebro-python3/CHANGES.txt
U zope.testing/branches/regebro-python3/buildout.cfg
U zope.testing/branches/regebro-python3/setup.py
D zope.testing/branches/regebro-python3/src/zope/testing/DEPENDENCIES.cfg
U zope.testing/branches/regebro-python3/src/zope/testing/__init__.py
U zope.testing/branches/regebro-python3/src/zope/testing/doctest.py
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/__init__.py
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/coverage.py
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/filter.py
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/find.py
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/formatter.py
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/options.py
A zope.testing/branches/regebro-python3/src/zope/testing/testrunner/process.py
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/profiling.py
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/runner.py
D zope.testing/branches/regebro-python3/src/zope/testing/testrunner/subprocess.py
A zope.testing/branches/regebro-python3/src/zope/testing/testrunner/tb_format.py
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-arguments.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-colors.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-coverage.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-debugging-layer-setup.test
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-debugging.txt
A zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-discovery.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-edge-cases.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-errors.txt
A zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample1/sampletests_discover.py
A zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample1/sampletests_discover_notests.py
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_1.py
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_e.py
A zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sampletests_buffering.py
A zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex-251759/
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-gc.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-knit.txt
A zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-layers-buff.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-layers-ntd.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-layers.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-leaks-err.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-profiling-cprofiler.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-profiling.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-progress.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-repeat.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-simple.txt
A zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-tb-format.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-test-selection.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-verbose.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-wo-source.txt
U zope.testing/branches/regebro-python3/src/zope/testing/testrunner/tests.py
-=-
Modified: zope.testing/branches/regebro-python3/CHANGES.txt
===================================================================
--- zope.testing/branches/regebro-python3/CHANGES.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/CHANGES.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -1,12 +1,115 @@
zope.testing Changelog
**********************
+3.8.2 (unreleased)
+==================
+
+- Removing hotshot profiler when using Python 2.6. That makes zope.testing
+ compatible with Python 2.6
+
+
+3.8.1 (2009-08-12)
+==================
+
+- Avoid hardcoding sys.argv[0] as script;
+ allow, for instance, Zope 2's `bin/instance test` (LP#407916).
+
+- Produce a clear error message when a subprocess doesn't follow the
+ zope.testing.testrunner protocol (LP#407916).
+
+- Do not unnecessarily squelch verbose output in a subprocess when there are
+ not multiple subprocesses.
+
+- Do not unnecessarily batch subprocess output, which can stymie automated and
+ human processes for identifying hung tests.
+
+- Include incremental output when there are multiple subprocesses and a
+ verbosity of -vv or greater is requested. This again is not batched,
+ supporting automated processes and humans looking for hung tests.
+
+
+3.8.0 (2009-07-24)
+==================
+
+- Testrunner automatically picks up descendants of unittest.TestCase in test
+ modules, so you don't have to provide a test_suite() anymore.
+
+
+3.7.7 (2009-07-15)
+==================
+
+- Clean up support for displaying tracebacks with supplements by turning it
+ into an always-enabled feature and making the dependency on zope.exceptions
+ explicit.
+
+- Fix #251759: Test runner descended into directories that aren't Python
+ packages.
+
+- Code cleanups.
+
+
+3.7.6 (2009-07-02)
+==================
+
+- Add zope-testrunner console_scripts entry point. This exposes a
+ zope-testrunner binary with default installs allowing the testrunner to be
+ run from the command line.
+
+3.7.5 (2009-06-08)
+==================
+
+- Fix bug when running subprocesses on Windows.
+
+- The option REPORT_ONLY_FIRST_FAILURE (command line option "-1") is now
+ respected even when a doctest declares its own REPORTING_FLAGS, such as
+ REPORT_NDIFF.
+
+- Fixed bug that broke readline with pdb when using doctest
+ (see http://bugs.python.org/issue5727).
+
+- Made tests pass on Windows and Linux at the same time.
+
+
+3.7.4 (2009-05-01)
+==================
+
+- Filenames of doctest examples now contain the line number and not
+ only the example number. So a stack trace in pdb tells the exact
+ line number of the current example. This fixes
+ https://bugs.launchpad.net/bugs/339813
+
+- Colorization of doctest output correctly handles blank lines.
+
+
+3.7.3 (2009-04-22)
+==================
+
+- Better deal with rogue threads by always exiting with status so even
+ spinning daemon threads won't block the runner from exiting. This deprecated
+ the ``--with-exit-status`` option.
+
+
+3.7.2 (2009-04-13)
+==================
+
+- fix test failure on Python 2.4 because of slight difference in the way
+ coverage is reported (__init__ files with only a single comment line are now
+ not reported)
+- fixed bug that caused the test runner to hang when running subprocesses (as a
+ result Python 2.3 is no longer supported).
+- there is apparently a bug in Python 2.6 (related to
+ http://bugs.python.org/issue1303673) that causes the profile tests to fail.
+- added explanitory notes to buildout.cfg about how to run the tests with
+ multiple versions of Python
+
+
3.7.1 (2008-10-17)
==================
- The setupstack temporary-directory support now properly handles
read-only files by making them writable before removing them.
+
3.7.0 (2008-09-22)
==================
Modified: zope.testing/branches/regebro-python3/buildout.cfg
===================================================================
--- zope.testing/branches/regebro-python3/buildout.cfg 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/buildout.cfg 2009-09-13 08:21:57 UTC (rev 103878)
@@ -6,17 +6,29 @@
recipe = zc.recipe.testrunner
eggs = zope.testing
-[test23]
-python = python23
-recipe = zc.recipe.testrunner
-eggs = zope.testing
+# The [test2X] sections below are to make testing with various Python versions
+# easier. You'll need entries in your default.cfg that point to the location
+# that your various versions of Python are installed. Like so:
+#
+# [python2.4]
+# executable = /usr/local/bin/python2.4
+#
+# And then run "bin/buildout install test24 test25 test26" to build the
+# version-specific test scripts. Once that's done you ran run "bin/test24"
+# (etc.).
+
[test24]
-python = python24
+python = python2.4
recipe = zc.recipe.testrunner
eggs = zope.testing
[test25]
-python = python25
+python = python2.5
recipe = zc.recipe.testrunner
eggs = zope.testing
+
+[test26]
+python = python2.6
+recipe = zc.recipe.testrunner
+eggs = zope.testing
Modified: zope.testing/branches/regebro-python3/setup.py
===================================================================
--- zope.testing/branches/regebro-python3/setup.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/setup.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -11,6 +11,11 @@
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
+# This package is developed by the Zope Toolkit project, documented here:
+# http://docs.zope.org/zopetoolkit
+# When developing and releasing this package, please follow the documented
+# Zope Toolkit policies as described by this documentation.
+##############################################################################
"""Setup for zope.testing package
$Id$
@@ -23,8 +28,9 @@
extra = dict(
namespace_packages=['zope',],
install_requires = ['setuptools',
+ 'zope.exceptions',
'zope.interface'],
- extras_require={'zope_tracebacks': 'zope.exceptions'},
+ entry_points = {'console_scripts': ['zope-testrunner = zope.testing.testrunner:run',]},
include_package_data = True,
zip_safe = False,
)
@@ -79,14 +85,29 @@
setup(
name='zope.testing',
- version='3.7.2dev',
+ version = '3.8.2dev',
url='http://pypi.python.org/pypi/zope.testing',
license='ZPL 2.1',
description='Zope testing framework, including the testrunner script.',
long_description=long_description,
author='Zope Corporation and Contributors',
- author_email='zope3-dev at zope.org',
+ author_email='zope-dev at zope.org',
packages=["zope", "zope.testing"],
package_dir = {'': 'src'},
+
+ classifiers=[
+ "Development Status :: 5 - Production/Stable",
+ "Environment :: Console",
+ "Framework :: Zope3",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: Zope Public License",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python :: 2.4",
+ "Programming Language :: Python :: 2.5",
+ "Programming Language :: Python :: 2.6",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Topic :: Software Development :: Testing",
+ ],
+
**extra)
Deleted: zope.testing/branches/regebro-python3/src/zope/testing/DEPENDENCIES.cfg
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/DEPENDENCIES.cfg 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/DEPENDENCIES.cfg 2009-09-13 08:21:57 UTC (rev 103878)
@@ -1 +0,0 @@
-zope.exceptions
Modified: zope.testing/branches/regebro-python3/src/zope/testing/__init__.py
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/__init__.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/__init__.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -11,20 +11,3 @@
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
-"""Set up testing environment
-
-$Id$
-"""
-import os
-
-def patchTracebackModule():
- """Use the ExceptionFormatter to show more info in tracebacks.
- """
- from zope.exceptions.exceptionformatter import format_exception
- import traceback
- traceback.format_exception = format_exception
-
-# Don't use the new exception formatter by default, since it
-# doesn't show filenames.
-if os.environ.get('NEW_ZOPE_EXCEPTION_FORMATTER', 0):
- patchTracebackModule()
Modified: zope.testing/branches/regebro-python3/src/zope/testing/doctest.py
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/doctest.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/doctest.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -355,9 +355,11 @@
self.__out = out
self.__debugger_used = False
try:
- pdb.Pdb.__init__(self, stdin=sys.stdin, stdout=out)
+ pdb.Pdb.__init__(self, stdin=sys.stdin, stdout=out)
except TypeError:
- pdb.Pdb.__init__(self)
+ pdb.Pdb.__init__(self)
+ # enable readline
+ self.use_rawinput = 1
def set_trace(self):
self.__debugger_used = True
@@ -1345,7 +1347,10 @@
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
- filename = '<doctest %s[%d]>' % (test.name, examplenum)
+ # Line number counting starts with 0 so we add one to get
+ # the real line number.
+ filename = '<doctest %s[line %d, example %d]>' % (
+ test.name, example.lineno+1, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
@@ -1428,9 +1433,10 @@
self.failures += f
self.tries += t
- __LINECACHE_FILENAME_RE = re.compile(r'<doctest '
- r'(?P<name>[\w\.]+)'
- r'\[(?P<examplenum>\d+)\]>$')
+ __LINECACHE_FILENAME_RE = re.compile(
+ r'<doctest (?P<name>[\w\.]+)\[line \d+, example (?P<examplenum>\d+)\]>$'
+ )
+
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
@@ -2305,6 +2311,9 @@
# so add the default reporting flags
optionflags |= _unittest_reportflags
+ if _unittest_reportflags & REPORT_ONLY_FIRST_FAILURE:
+ optionflags |= REPORT_ONLY_FIRST_FAILURE
+
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
def write(value):
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/__init__.py
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/__init__.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/__init__.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -23,16 +23,26 @@
import zope.testing.testrunner.interfaces
+def run(defaults=None, args=None, script_parts=None):
+ """Main runner function which can be and is being used from main programs.
-def run(defaults=None, args=None):
- # This function is here to make the whole test runner compatible before
- # the large refactoring.
+ Will execute the tests and exit the process according to the test result.
+
+ """
+ failed = run_internal(defaults, args, script_parts=script_parts)
+ sys.exit(int(failed))
+
+
+def run_internal(defaults=None, args=None, script_parts=None):
+ """Execute tests.
+
+ Returns whether errors or failures occured during testing.
+
+ """
# XXX Bah. Lazy import to avoid circular/early import problems
from zope.testing.testrunner.runner import Runner
- runner = Runner(defaults, args)
+ runner = Runner(defaults, args, script_parts=script_parts)
runner.run()
- if runner.failed and runner.options.exitwithstatus:
- sys.exit(1)
return runner.failed
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/coverage.py
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/coverage.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/coverage.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -22,6 +22,7 @@
import threading
import zope.testing.testrunner.feature
+from zope.testing.testrunner.find import test_dirs
# For some reason, the doctest module resets the trace callable randomly, thus
@@ -134,7 +135,9 @@
def global_setup(self):
"""Executed once when the test runner is being set up."""
self.directory = os.path.join(os.getcwd(), self.runner.options.coverage)
- self.tracer = TestTrace(self.runner.test_directories,
+
+ # FIXME: This shouldn't rely on the find feature directly.
+ self.tracer = TestTrace(test_dirs(self.runner.options, {}),
trace=False, count=True)
self.tracer.start()
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/filter.py
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/filter.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/filter.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -65,7 +65,8 @@
# No pattern matched this name so we remove it
layers.pop(name)
- if self.runner.options.verbose:
+ if (self.runner.options.verbose and
+ not self.runner.options.resume_layer):
if self.runner.options.all:
msg = "Running tests at all levels"
else:
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/find.py
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/find.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/find.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -153,7 +153,15 @@
)
else:
try:
- suite = getattr(module, options.suite_name)()
+ if hasattr(module, options.suite_name):
+ suite = getattr(module, options.suite_name)()
+ else:
+ suite = unittest.defaultTestLoader.loadTestsFromModule(module)
+ if suite.countTestCases() == 0:
+ raise TypeError(
+ "Module %s does not define any tests"
+ % module_name)
+
if isinstance(suite, unittest.TestSuite):
check_suite(suite, module_name)
else:
@@ -204,7 +212,10 @@
for (p, package) in test_dirs(options, {}):
for dirname, dirs, files in walk_with_symlinks(options, p):
if dirname != p and not contains_init_py(options, files):
- continue # not a plausible test directory
+ # This is not a plausible test directory. Avoid descending
+ # further.
+ del dirs[:]
+ continue
root2ext = {}
dirs[:] = filter(identifier, dirs)
d = os.path.split(dirname)[1]
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/formatter.py
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/formatter.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/formatter.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -52,6 +52,9 @@
progress = property(lambda self: self.options.progress)
verbose = property(lambda self: self.options.verbose)
+ in_subprocess = property(
+ lambda self: self.options.resume_layer is not None and
+ self.options.processes > 1)
def compute_max_width(self):
"""Try to determine the terminal width."""
@@ -263,6 +266,12 @@
elif self.verbose == 1:
sys.stdout.write('.' * test.countTestCases())
+
+ elif self.in_subprocess:
+ sys.stdout.write('.' * test.countTestCases())
+ # Give the parent process a new line so it sees the progress
+ # in a timely manner.
+ sys.stdout.write('\n')
if self.verbose > 1:
s = str(test)
@@ -584,12 +593,15 @@
self.color('normal'), '\n'])
else:
print line
- elif line.startswith(' '):
+ elif line.startswith(' ') or line.strip() == '':
if colorize_diff and len(line) > 4:
color = self.diff_color.get(line[4], color_of_indented_text)
print self.colorize(color, line)
else:
- print self.colorize(color_of_indented_text, line)
+ if line.strip() != '':
+ print self.colorize(color_of_indented_text, line)
+ else:
+ print line
else:
colorize_diff = False
if line.startswith('Failed example'):
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/options.py
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/options.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/options.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -406,13 +406,6 @@
other = optparse.OptionGroup(parser, "Other", "Other options")
other.add_option(
- '--exit-with-status', action="store_true", dest='exitwithstatus',
- help="""\
-Return an error exit status if the tests failed. This can be useful for
-an invoking process that wants to monitor the result of a test run.
-""")
-
-other.add_option(
'-j', action="store", type="int", dest='processes',
help="""\
Use up to given number of parallel processes to execute tests. May decrease
@@ -444,6 +437,12 @@
compilation to .pyc/.pyo. Use of this option implies --keepbytecode.
""")
+other.add_option(
+ '--exit-with-status', action="store_true", dest='exitwithstatus',
+ help="""DEPRECATED: The test runner will always exit with a status.\
+""")
+
+
parser.add_option_group(other)
######################################################################
Copied: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/process.py (from rev 103876, zope.testing/trunk/src/zope/testing/testrunner/process.py)
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/process.py (rev 0)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/process.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -0,0 +1,50 @@
+##############################################################################
+#
+# Copyright (c) 2004-2008 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Subprocess support.
+
+$Id: __init__.py 86218 2008-05-03 14:17:26Z ctheune $
+"""
+
+import sys
+import time
+import zope.testing.testrunner.feature
+
+
+class SubProcess(zope.testing.testrunner.feature.Feature):
+ """Lists all tests in the report instead of running the tests."""
+
+ def __init__(self, runner):
+ super(SubProcess, self).__init__(runner)
+ self.active = bool(runner.options.resume_layer)
+
+ def global_setup(self):
+ self.original_stderr = sys.stderr
+ sys.stderr = sys.stdout
+ if self.runner.options.processes > 1:
+ # If we only have one subprocess, there's absolutely
+ # no reason to squelch. We will let the messages through in a
+ # timely manner, if they have been requested. On the other hand, if
+ # there are multiple processes, we do squelch to 0.
+ self.runner.options.verbose = 0
+ self.progress = False
+
+ def report(self):
+ sys.stdout.close()
+ # Communicate with the parent. The protocol is obvious:
+ print >> self.original_stderr, self.runner.ran, \
+ len(self.runner.failures), len(self.runner.errors)
+ for test, exc_info in self.runner.failures:
+ print >> self.original_stderr, ' '.join(str(test).strip().split('\n'))
+ for test, exc_info in self.runner.errors:
+ print >> self.original_stderr, ' '.join(str(test).strip().split('\n'))
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/profiling.py
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/profiling.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/profiling.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -55,36 +55,40 @@
# some Linux distributions don't include the profiler, which hotshot uses
-try:
- import hotshot
- import hotshot.stats
-except ImportError:
- pass
-else:
- class HotshotProfiler(object):
- """hotshot interface"""
+if not sys.hexversion >= 0x02060000:
+ # Hotshot is not maintained any longer in 2.6. It does not support
+ # merging to hotshot files. Thus we won't use it in python2.6 and
+ # onwards
+ try:
+ import hotshot
+ import hotshot.stats
+ except ImportError:
+ pass
+ else:
+ class HotshotProfiler(object):
+ """hotshot interface"""
- def __init__(self, filepath):
- self.profiler = hotshot.Profile(filepath)
- self.enable = self.profiler.start
- self.disable = self.profiler.stop
+ def __init__(self, filepath):
+ self.profiler = hotshot.Profile(filepath)
+ self.enable = self.profiler.start
+ self.disable = self.profiler.stop
+
+ def finish(self):
+ self.profiler.close()
- def finish(self):
- self.profiler.close()
+ def loadStats(self, prof_glob):
+ stats = None
+ for file_name in glob.glob(prof_glob):
+ loaded = hotshot.stats.load(file_name)
+ if stats is None:
+ stats = loaded
+ else:
+ stats.add(loaded)
+ return stats
- def loadStats(self, prof_glob):
- stats = None
- for file_name in glob.glob(prof_glob):
- loaded = hotshot.stats.load(file_name)
- if stats is None:
- stats = loaded
- else:
- stats.add(loaded)
- return stats
+ available_profilers['hotshot'] = HotshotProfiler
- available_profilers['hotshot'] = HotshotProfiler
-
class Profiling(zope.testing.testrunner.feature.Feature):
def __init__(self, runner):
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/runner.py
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/runner.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/runner.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -16,21 +16,19 @@
$Id: __init__.py 86232 2008-05-03 15:09:33Z ctheune $
"""
+import subprocess
+
import cStringIO
import gc
-import glob
-import os
+import Queue
import re
import sys
-import tempfile
import threading
import time
import traceback
import unittest
-from zope.testing import doctest
-from zope.testing.testrunner.find import find_tests, test_dirs
-from zope.testing.testrunner.find import StartUpFailure, import_name
+from zope.testing.testrunner.find import import_name
from zope.testing.testrunner.find import name_from_layer, _layer_name_cache
from zope.testing.testrunner.refcount import TrackRefs
from zope.testing.testrunner.options import get_options
@@ -43,14 +41,17 @@
import zope.testing.testrunner.garbagecollection
import zope.testing.testrunner.listing
import zope.testing.testrunner.statistics
-import zope.testing.testrunner.subprocess
+import zope.testing.testrunner.process
import zope.testing.testrunner.interfaces
import zope.testing.testrunner.debug
+import zope.testing.testrunner.tb_format
+
PYREFCOUNT_PATTERN = re.compile('\[[0-9]+ refs\]')
is_jython = sys.platform.startswith('java')
+
class SubprocessError(Exception):
"""An error occurred when running a subprocess
"""
@@ -77,11 +78,12 @@
"""
def __init__(self, defaults=None, args=None, found_suites=None,
- options=None):
+ options=None, script_parts=None):
self.defaults = defaults
self.args = args
self.found_suites = found_suites
self.options = options
+ self.script_parts = script_parts
self.failed = True
self.ran = 0
@@ -147,7 +149,6 @@
def configure(self):
if self.args is None:
self.args = sys.argv[:]
-
# Check to see if we are being run as a subprocess. If we are,
# then use the resume-layer and defaults passed in.
if len(self.args) > 1 and self.args[1] == '--resume-layer':
@@ -171,9 +172,6 @@
self.options = options
- # XXX I moved this here mechanically. Move to find feature?
- self.test_directories = test_dirs(self.options, {})
-
self.features.append(zope.testing.testrunner.selftest.SelfTest(self))
self.features.append(zope.testing.testrunner.logsupport.Logging(self))
self.features.append(zope.testing.testrunner.coverage.Coverage(self))
@@ -183,14 +181,18 @@
# Jython GC support is not yet implemented
pass
else:
- self.features.append(zope.testing.testrunner.garbagecollection.Threshold(self))
- self.features.append(zope.testing.testrunner.garbagecollection.Debug(self))
+ self.features.append(
+ zope.testing.testrunner.garbagecollection.Threshold(self))
+ self.features.append(
+ zope.testing.testrunner.garbagecollection.Debug(self))
self.features.append(zope.testing.testrunner.find.Find(self))
- self.features.append(zope.testing.testrunner.subprocess.SubProcess(self))
+ self.features.append(zope.testing.testrunner.process.SubProcess(self))
self.features.append(zope.testing.testrunner.filter.Filter(self))
self.features.append(zope.testing.testrunner.listing.Listing(self))
- self.features.append(zope.testing.testrunner.statistics.Statistics(self))
+ self.features.append(
+ zope.testing.testrunner.statistics.Statistics(self))
+ self.features.append(zope.testing.testrunner.tb_format.Traceback(self))
# Remove all features that aren't activated
self.features = [f for f in self.features if f.active]
@@ -228,11 +230,12 @@
if should_resume:
setup_layers = None
if layers_to_run:
- self.ran += resume_tests(self.options, self.features,
+ self.ran += resume_tests(
+ self.script_parts, self.options, self.features,
layers_to_run, self.failures, self.errors)
if setup_layers:
- if self.options.resume_layer == None:
+ if self.options.resume_layer is None:
self.options.output.info("Tearing down left over layers:")
tear_down_unneeded(self.options, (), setup_layers, True)
@@ -308,7 +311,8 @@
output.stop_tests()
failures.extend(result.failures)
errors.extend(result.errors)
- output.summary(result.testsRun, len(result.failures), len(result.errors), t)
+ output.summary(result.testsRun, len(result.failures),
+ len(result.errors), t)
ran = result.testsRun
if is_jython:
@@ -353,8 +357,8 @@
output.info("Running %s tests:" % layer_name)
tear_down_unneeded(options, needed, setup_layers)
- if options.resume_layer != None:
- output.info_suboptimal( " Running in a subprocess.")
+ if options.resume_layer is not None:
+ output.info_suboptimal(" Running in a subprocess.")
try:
setup_layer(options, layer, setup_layers)
@@ -369,18 +373,23 @@
else:
return run_tests(options, tests, layer_name, failures, errors)
+
class SetUpLayerFailure(unittest.TestCase):
def runTest(self):
"Layer set up failure."
-def spawn_layer_in_subprocess(result, options, features, layer_name, layer,
- failures, errors, resume_number):
+
+def spawn_layer_in_subprocess(result, script_parts, options, features,
+ layer_name, layer, failures, errors,
+ resume_number):
try:
- args = [sys.executable,
- sys.argv[0],
- '--resume-layer', layer_name, str(resume_number),
- ]
+ # BBB
+ if script_parts is None:
+ script_parts = sys.argv[0:1]
+ args = [sys.executable]
+ args.extend(script_parts)
+ args.extend(['--resume-layer', layer_name, str(resume_number)])
for d in options.testrunner_defaults:
args.extend(['--default', d])
@@ -388,87 +397,150 @@
# this is because of a bug in Python (http://www.python.org/sf/900092)
if (options.profile == 'hotshot'
- and sys.version_info[:3] <= (2,4,1)):
+ and sys.version_info[:3] <= (2, 4, 1)):
args.insert(1, '-O')
if sys.platform.startswith('win'):
args = args[0] + ' ' + ' '.join([
('"' + a.replace('\\', '\\\\').replace('"', '\\"') + '"')
- for a in args[1:]
- ])
+ for a in args[1:]])
for feature in features:
feature.layer_setup(layer)
- subin, subout, suberr = os.popen3(args)
+ child = subprocess.Popen(args, shell=False, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ close_fds=not sys.platform.startswith('win'))
+
while True:
try:
- for line in subout:
- result.stdout.append(line)
+ while True:
+ # We use readline() instead of iterating over stdout
+ # because it appears that iterating over stdout causes a
+ # lot more buffering to take place (probably so it can
+ # return its lines as a batch). We don't want too much
+ # buffering because this foils automatic and human monitors
+ # trying to verify that the subprocess is still alive.
+ l = child.stdout.readline()
+ if not l:
+ break
+ result.write(l)
except IOError, e:
if e.errno == errno.EINTR:
- # If the reading the subprocess input is interruped (as
- # be caused by recieving SIGCHLD), then retry.
+ # If the subprocess dies before we finish reading its
+ # output, a SIGCHLD signal can interrupt the reading.
+ # The correct thing to to in that case is to retry.
continue
- options.output.error(
+ output.error(
"Error reading subprocess output for %s" % layer_name)
- options.output.info(str(e))
+ output.info(str(e))
else:
break
- # The subprocess may have spewed any number of things to stderr, so
- # we'll keep looking until we find the information we're looking for.
- whole_suberr = ''
- while True:
- line = suberr.readline()
- whole_suberr += line
- if not line:
- raise SubprocessError(
- 'No subprocess summary found', repr(whole_suberr))
-
+ # Now stderr should be ready to read the whole thing.
+ erriter = iter(child.stderr.read().splitlines())
+ nfail = nerr = 0
+ for line in erriter:
try:
result.num_ran, nfail, nerr = map(int, line.strip().split())
+ except ValueError:
+ continue
+ else:
break
- except KeyboardInterrupt:
- raise
- except:
- continue
+ else:
+ output = options.output
+ output.error_with_banner("Could not communicate with subprocess:\n"
+ "\n" + suberr)
while nfail > 0:
nfail -= 1
- failures.append((suberr.readline().strip(), None))
+ failures.append((erriter.next().strip(), None))
while nerr > 0:
nerr -= 1
- errors.append((suberr.readline().strip(), None))
+ errors.append((erriter.next().strip(), None))
finally:
result.done = True
-class SubprocessResult(object):
- def __init__(self):
- self.num_ran = 0
+class AbstractSubprocessResult(object):
+ """A result of a subprocess layer run."""
+
+ num_ran = 0
+ done = False
+
+ def __init__(self, layer_name, queue):
+ self.layer_name = layer_name
+ self.queue = queue
self.stdout = []
- self.done = False
+ def write(self, out):
+ """Receive a line of the subprocess out."""
-def resume_tests(options, features, layers, failures, errors):
+
+class DeferredSubprocessResult(AbstractSubprocessResult):
+ """Keeps stdout around for later processing,"""
+
+ def write(self, out):
+ if not _is_dots(out):
+ self.stdout.append(out)
+
+
+class ImmediateSubprocessResult(AbstractSubprocessResult):
+ """Sends complete output to queue."""
+
+ def write(self, out):
+ sys.stdout.write(out)
+ # Help keep-alive monitors (human or automated) keep up-to-date.
+ sys.stdout.flush()
+
+
+_is_dots = re.compile(r'\.+\n').match
+class KeepaliveSubprocessResult(AbstractSubprocessResult):
+ "Keeps stdout for later processing; sends marks to queue to show activity."
+
+ _done = False
+
+ def _set_done(self, value):
+ self._done = value
+ assert value, 'Internal error: unexpectedly setting done to False'
+ self.queue.put((self.layer_name, ' LAYER FINISHED'))
+ done = property(lambda self: self._done, _set_done)
+
+ def write(self, out):
+ if _is_dots(out):
+ self.queue.put((self.layer_name, out.strip()))
+ else:
+ self.stdout.append(out)
+
+
+def resume_tests(script_parts, options, features, layers, failures, errors):
results = []
+ stdout_queue = None
+ if options.processes == 1:
+ result_factory = ImmediateSubprocessResult
+ elif options.verbose > 1:
+ result_factory = KeepaliveSubprocessResult
+ stdout_queue = Queue.Queue()
+ else:
+ result_factory = DeferredSubprocessResult
resume_number = int(options.processes > 1)
ready_threads = []
for layer_name, layer, tests in layers:
- result = SubprocessResult()
+ result = result_factory(layer_name, stdout_queue)
results.append(result)
ready_threads.append(threading.Thread(
target=spawn_layer_in_subprocess,
- args=(result, options, features, layer_name, layer, failures,
- errors, resume_number)))
+ args=(result, script_parts, options, features, layer_name, layer,
+ failures, errors, resume_number)))
resume_number += 1
# Now start a few threads at a time.
running_threads = []
results_iter = iter(results)
current_result = results_iter.next()
+ last_layer_intermediate_output = None
+ output = None
while ready_threads or running_threads:
while len(running_threads) < options.processes and ready_threads:
thread = ready_threads.pop(0)
@@ -479,9 +551,27 @@
if not thread.isAlive():
del running_threads[index]
- # We want to display results in the order they would have been
- # displayed, had the work not been done in parallel.
+ # Clear out any messages in queue
+ while stdout_queue is not None:
+ previous_output = output
+ try:
+ layer_name, output = stdout_queue.get(False)
+ except Queue.Empty:
+ break
+ if layer_name != last_layer_intermediate_output:
+ # Clarify what layer is reporting activity.
+ if previous_output is not None:
+ sys.stdout.write(']\n')
+ sys.stdout.write(
+ '[Parallel tests running in %s:\n ' % (layer_name,))
+ last_layer_intermediate_output = layer_name
+ sys.stdout.write(output)
+ # Display results in the order they would have been displayed, had the
+ # work not been done in parallel.
while current_result and current_result.done:
+ if output is not None:
+ sys.stdout.write(']\n')
+ output = None
map(sys.stdout.write, current_result.stdout)
try:
@@ -489,6 +579,8 @@
except StopIteration:
current_result = None
+ # Help keep-alive monitors (human or automated) keep up-to-date.
+ sys.stdout.flush()
time.sleep(0.01) # Keep the loop from being too tight.
# Return the total number of tests run.
@@ -496,8 +588,8 @@
def tear_down_unneeded(options, needed, setup_layers, optional=False):
- # Tear down any layers not needed for these tests. The unneeded
- # layers might interfere.
+ # Tear down any layers not needed for these tests. The unneeded layers
+ # might interfere.
unneeded = [l for l in setup_layers if l not in needed]
unneeded = order_by_bases(unneeded)
unneeded.reverse()
@@ -522,6 +614,7 @@
Try running layer %r by itself.
"""
+
def setup_layer(options, layer, setup_layers):
assert layer is not object
output = options.output
@@ -656,7 +749,7 @@
to allow locating layers in cases where it would otherwise be
impossible.
"""
- if _layer_name_cache.has_key(layer_name):
+ if layer_name in _layer_name_cache:
return _layer_name_cache[layer_name]
layer_names = layer_name.split('.')
layer_module, module_layer_name = layer_names[:-1], layer_names[-1]
Deleted: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/subprocess.py
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/subprocess.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/subprocess.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -1,44 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004-2008 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Subprocess support.
-
-$Id: __init__.py 86218 2008-05-03 14:17:26Z ctheune $
-"""
-
-import sys
-import time
-import zope.testing.testrunner.feature
-
-
-class SubProcess(zope.testing.testrunner.feature.Feature):
- """Lists all tests in the report instead of running the tests."""
-
- def __init__(self, runner):
- super(SubProcess, self).__init__(runner)
- self.active = bool(runner.options.resume_layer)
-
- def global_setup(self):
- self.original_stderr = sys.stderr
- sys.stderr = sys.stdout
- self.runner.options.verbose = False
-
- def report(self):
- sys.stdout.close()
- # Communicate with the parent. The protocol is obvious:
- print >> self.original_stderr, self.runner.ran, \
- len(self.runner.failures), len(self.runner.errors)
- for test, exc_info in self.runner.failures:
- print >> self.original_stderr, ' '.join(str(test).strip().split('\n'))
- for test, exc_info in self.runner.errors:
- print >> self.original_stderr, ' '.join(str(test).strip().split('\n'))
Copied: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/tb_format.py (from rev 103876, zope.testing/trunk/src/zope/testing/testrunner/tb_format.py)
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/tb_format.py (rev 0)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/tb_format.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -0,0 +1,52 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Set up testing environment
+
+$Id: __init__.py 68482 2006-06-04 14:58:55Z jim $
+"""
+
+import StringIO
+import os
+import sys
+import traceback
+import zope.exceptions.exceptionformatter
+import zope.testing.testrunner.feature
+
+
+def format_exception(t, v, tb, limit=None):
+ fmt = zope.exceptions.exceptionformatter.TextExceptionFormatter(
+ limit=None, with_filenames=True)
+ return fmt.formatException(t, v, tb)
+
+
+def print_exception(t, v, tb, limit=None, file=None):
+ if file is None:
+ file = sys.stdout
+ file.writelines(format_exception(t, v, tb, limit))
+
+
+class Traceback(zope.testing.testrunner.feature.Feature):
+
+ active = True
+
+ def global_setup(self):
+ self.old_format = traceback.format_exception
+ traceback.format_exception = format_exception
+
+ self.old_print = traceback.print_exception
+ traceback.print_exception = print_exception
+
+ def global_teardown(self):
+ traceback.format_exception = self.old_format
+ traceback.print_exception = self.old_print
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-arguments.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-arguments.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-arguments.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -12,7 +12,7 @@
... '--tests-pattern', '^sampletestsf?$',
... ]
>>> from zope.testing import testrunner
- >>> testrunner.run(defaults, 'test --layer 111'.split())
+ >>> testrunner.run_internal(defaults, 'test --layer 111'.split())
Running samplelayers.Layer111 tests:
Set up samplelayers.Layerx in N.NNN seconds.
Set up samplelayers.Layer1 in N.NNN seconds.
@@ -38,7 +38,7 @@
... '--tests-pattern', '^sampletestsf?$',
... ]
>>> from zope.testing import testrunner
- >>> testrunner.run(defaults, 'test --layer 111'.split())
+ >>> testrunner.run_internal(defaults, 'test --layer 111'.split())
Listing samplelayers.Layer111 tests:
test_x1 (sample1.sampletests.test111.TestA)
test_y0 (sample1.sampletests.test111.TestA)
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-colors.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-colors.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-colors.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -52,7 +52,7 @@
A successful test run soothes the developer with warm green colors:
>>> sys.argv = 'test --layer 122 -c'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
{normal}Running samplelayers.Layer122 tests:{normal}
Set up samplelayers.Layer1 in {green}0.000{normal} seconds.
Set up samplelayers.Layer12 in {green}0.000{normal} seconds.
@@ -71,7 +71,7 @@
A failed test run highlights the failures in red:
>>> sys.argv = 'test -c --tests-pattern ^sampletests(f|_e|_f)?$ '.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
{normal}Running samplelayers.Layer1 tests:{normal}
Set up samplelayers.Layer1 in {green}0.000{normal} seconds.
{normal} Ran {green}9{normal} tests with {green}0{normal} failures and {green}0{normal} errors in {green}0.001{normal} seconds.{normal}
@@ -118,12 +118,13 @@
{red} Traceback (most recent call last):{normal}
{red} File ".../doctest.py", line 1356, in __run{normal}
{red} compileflags, 1) in test.globs{normal}
- {red} File "<doctest sample2.sampletests_e.eek[0]>", line 1, in ?{normal}
+ {red} File "<doctest sample2.sampletests_e.eek[line 2, example 0]>", line 1, in ?{normal}
{red} f(){normal}
{red} File "testrunner-ex/sample2/sampletests_e.py", line 19, in f{normal}
{red} g(){normal}
{red} File "testrunner-ex/sample2/sampletests_e.py", line 24, in g{normal}
{red} x = y + 1{normal}
+ {red} - __traceback_info__: I don't know what Y should be.{normal}
{red} NameError: global name 'y' is not defined{normal}
<BLANKLINE>
<BLANKLINE>
@@ -138,6 +139,7 @@
{cyan} g(){normal}
{normal} File "{boldblue}testrunner-ex/sample2/sampletests_e.py{normal}", line {boldred}24{normal}, in {boldcyan}g{normal}
{cyan} x = y + 1{normal}
+ {red} - __traceback_info__: I don't know what Y should be.{normal}
{red}NameError: global name 'y' is not defined{normal}
<BLANKLINE>
<BLANKLINE>
@@ -154,9 +156,9 @@
{red} Traceback (most recent call last):{normal}
{red} File ".../doctest.py", line 1356, in __run{normal}
{red} compileflags, 1) in test.globs{normal}
- {red} File "<doctest e.txt[1]>", line 1, in ?{normal}
+ {red} File "<doctest e.txt[line 4, example 1]>", line 1, in ?{normal}
{red} f(){normal}
- {red} File "<doctest e.txt[0]>", line 2, in f{normal}
+ {red} File "<doctest e.txt[line 1, example 0]>", line 2, in f{normal}
{red} return x{normal}
{red} NameError: global name 'x' is not defined{normal}
<BLANKLINE>
@@ -186,7 +188,7 @@
colors:
>>> sys.argv = 'test --tests-pattern ^pledge$ -c'.split()
- >>> _ = testrunner.run(defaults)
+ >>> _ = testrunner.run_internal(defaults)
{normal}Running zope.testing.testrunner.layer.UnitTests tests:{normal}
Set up zope.testing.testrunner.layer.UnitTests in {green}N.NNN{normal} seconds.
<BLANKLINE>
@@ -218,7 +220,7 @@
apart:
>>> sys.argv = 'test --tests-pattern ^pledge$ --ndiff -c'.split()
- >>> _ = testrunner.run(defaults)
+ >>> _ = testrunner.run_internal(defaults)
{normal}Running zope.testing.testrunner.layer.UnitTests tests:{normal}
Set up zope.testing.testrunner.layer.UnitTests in {green}N.NNN{normal} seconds.
<BLANKLINE>
@@ -243,14 +245,44 @@
{normal}Tearing down left over layers:{normal}
Tear down zope.testing.testrunner.layer.UnitTests in {green}N.NNN{normal} seconds.
+Even test failures that have actual blank lines (as opposed to <BLANKLINE>) in
+them are highlighted correctly.
+ >>> import zope.testing.testrunner.formatter
+ >>> formatter = zope.testing.testrunner.formatter.ColorfulOutputFormatter(None)
+ >>> formatter.print_doctest_failure("""\
+ ... File "sometest.txt", line 221, in sometest.txt
+ ... Failed example:
+ ... foo()
+ ... Expected:
+ ... Output that contains
+ ...
+ ... blank lines.
+ ... Got:
+ ... Output that still contains
+ ...
+ ... blank lines.""")
+ {normal} File "sometest.txt", line 221, in sometest.txt{normal}
+ Failed example:
+ {cyan} foo(){normal}
+ Expected:
+ {green} Output that contains{normal}
+ <BLANKLINE>
+ {green} blank lines.{normal}
+ Got:
+ {red} Output that still contains{normal}
+ <BLANKLINE>
+ {red} blank lines.{normal}
+ <BLANKLINE>
+
+
Timing individual tests
-----------------------
At very high verbosity levels you can see the time taken by each test
>>> sys.argv = 'test -u -t test_one.TestNotMuch -c -vvv'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
{normal}Running tests at level 1{normal}
{normal}Running zope.testing.testrunner.layer.UnitTests tests:{normal}
Set up zope.testing.testrunner.layer.UnitTests in {green}N.NNN{normal} seconds.
@@ -272,7 +304,7 @@
in the test runner to 0 seconds to make all of the tests seem slow.
>>> sys.argv = 'test -u -t test_one.TestNotMuch -c -vvv --slow-test 0'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
{normal}Running tests at level 1{normal}
{normal}Running zope.testing.testrunner.layer.UnitTests tests:{normal}
Set up zope.testing.testrunner.layer.UnitTests in {green}N.NNN{normal} seconds.
@@ -298,7 +330,7 @@
--no-color options will disable colorized output:
>>> sys.argv = 'test --layer 122 -c -C'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running samplelayers.Layer122 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
Set up samplelayers.Layer12 in 0.000 seconds.
@@ -311,7 +343,7 @@
False
>>> sys.argv = 'test --layer 122 -c --no-color'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running samplelayers.Layer122 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
Set up samplelayers.Layer12 in 0.000 seconds.
@@ -332,7 +364,7 @@
pretends it is a terminal, but the curses module will realize it isn't:
>>> sys.argv = 'test --layer 122 --auto-color'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running samplelayers.Layer122 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
Set up samplelayers.Layer12 in 0.000 seconds.
@@ -356,7 +388,7 @@
>>> sys.modules['curses'] = FakeCurses()
>>> sys.argv = 'test --layer 122 --auto-color'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
{normal}Running samplelayers.Layer122 tests:{normal}
Set up samplelayers.Layer1 in {green}0.000{normal} seconds.
Set up samplelayers.Layer12 in {green}0.000{normal} seconds.
@@ -375,7 +407,7 @@
>>> sys.stdout = real_stdout
>>> sys.argv = 'test --layer 122 --auto-color'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running samplelayers.Layer122 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
Set up samplelayers.Layer12 in 0.000 seconds.
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-coverage.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-coverage.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-coverage.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -15,7 +15,7 @@
>>> sys.argv = 'test --coverage=coverage_dir'.split()
>>> from zope.testing import testrunner
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running samplelayers.Layer1 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
Ran 9 tests with 0 failures and 0 errors in 0.000 seconds.
@@ -52,31 +52,7 @@
Tearing down left over layers:
Tear down zope.testing.testrunner.layer.UnitTests in 0.000 seconds.
lines cov% module (path)
- 1 100% sample1.__init__ (testrunner-ex/sample1/__init__.py)
- 1 100% sample1.sample11.__init__ (testrunner-ex/sample1/sample11/__init__.py)
- 74 86% sample1.sample11.sampletests (testrunner-ex/sample1/sample11/sampletests.py)
- 1 100% sample1.sample13.__init__ (testrunner-ex/sample1/sample13/__init__.py)
- 48 100% sample1.sample13.sampletests (testrunner-ex/sample1/sample13/sampletests.py)
- 1 100% sample1.sampletests.__init__ (testrunner-ex/sample1/sampletests/__init__.py)
- 48 100% sample1.sampletests.test1 (testrunner-ex/sample1/sampletests/test1.py)
- 74 100% sample1.sampletests.test11 (testrunner-ex/sample1/sampletests/test11.py)
- 74 100% sample1.sampletests.test111 (testrunner-ex/sample1/sampletests/test111.py)
- 74 100% sample1.sampletests.test112 (testrunner-ex/sample1/sampletests/test112.py)
- 74 100% sample1.sampletests.test12 (testrunner-ex/sample1/sampletests/test12.py)
- 74 100% sample1.sampletests.test121 (testrunner-ex/sample1/sampletests/test121.py)
- 74 100% sample1.sampletests.test122 (testrunner-ex/sample1/sampletests/test122.py)
- 48 100% sample1.sampletests.test_one (testrunner-ex/sample1/sampletests/test_one.py)
- 48 100% sample1.sampletestsf (testrunner-ex/sample1/sampletestsf.py)
- 1 100% sample2.__init__ (testrunner-ex/sample2/__init__.py)
- 1 100% sample2.sample21.__init__ (testrunner-ex/sample2/sample21/__init__.py)
- 48 100% sample2.sample21.sampletests (testrunner-ex/sample2/sample21/sampletests.py)
- 1 100% sample2.sampletests.__init__ (testrunner-ex/sample2/sampletests/__init__.py)
- 48 100% sample2.sampletests.test_1 (testrunner-ex/sample2/sampletests/test_1.py)
- 48 100% sample2.sampletests.testone (testrunner-ex/sample2/sampletests/testone.py)
- 1 100% sample3.__init__ (testrunner-ex/sample3/__init__.py)
- 48 100% sample3.sampletests (testrunner-ex/sample3/sampletests.py)
- 84 85% samplelayers (testrunner-ex/samplelayers.py)
- 1 100% sampletests.__init__ (testrunner-ex/sampletests/__init__.py)
+ ...
48 100% sampletests.test1 (testrunner-ex/sampletests/test1.py)
74 100% sampletests.test11 (testrunner-ex/sampletests/test11.py)
74 100% sampletests.test111 (testrunner-ex/sampletests/test111.py)
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-debugging-layer-setup.test
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-debugging-layer-setup.test 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-debugging-layer-setup.test 2009-09-13 08:21:57 UTC (rev 103878)
@@ -43,7 +43,7 @@
>>> sys.argv = [testrunner_script]
>>> import zope.testing.testrunner
>>> try:
- ... zope.testing.testrunner.run(['--path', dir, '-D'])
+ ... zope.testing.testrunner.run_internal(['--path', dir, '-D'])
... finally: sys.stdin = real_stdin
... # doctest: +ELLIPSIS
Running tests.Layer tests:
@@ -98,7 +98,7 @@
>>> import sys
>>> try:
- ... zope.testing.testrunner.run(
+ ... zope.testing.testrunner.run_internal(
... ['--path', dir, '-Dvv', '--tests-pattern', 'tests2'])
... finally: sys.stdin = real_stdin
... # doctest: +ELLIPSIS +REPORT_NDIFF
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-debugging.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-debugging.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-debugging.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -32,7 +32,7 @@
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t set_trace1').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
... # doctest: +ELLIPSIS
Running zope.testing.testrunner.layer.UnitTests tests:
@@ -58,7 +58,7 @@
>>> sys.stdin = Input('p x\nc')
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t post_mortem1 -D').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
... # doctest: +NORMALIZE_WHITESPACE +REPORT_NDIFF
Running zope.testing.testrunner.layer.UnitTests tests:
@@ -87,7 +87,7 @@
>>> sys.stdin = Input('up\np x\np y\nc')
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t post_mortem_failure1 -D').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
... # doctest: +NORMALIZE_WHITESPACE +REPORT_NDIFF
Running zope.testing.testrunner.layer.UnitTests tests:
Copied: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-discovery.txt (from rev 103876, zope.testing/trunk/src/zope/testing/testrunner/testrunner-discovery.txt)
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-discovery.txt (rev 0)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-discovery.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -0,0 +1,48 @@
+Automatically discovering tests
+===============================
+
+You can explicitly specify which tests to run by providing a function that
+returns a unittest.TestSuite in the test modules (the name of the function can
+be configured with the --suite-name parameter, it defaults to 'test_suite'). If
+no such function is present, testrunner will use all classes in the module that
+inherit from unittest.TestCase as tests:
+
+ >>> import os, sys
+ >>> directory_with_tests = os.path.join(this_directory, 'testrunner-ex')
+
+ >>> from zope.testing import testrunner
+
+ >>> defaults = [
+ ... '--path', directory_with_tests,
+ ... '--tests-pattern', '^sampletestsf?$',
+ ... ]
+ >>> sys.argv = ['test',
+ ... '--tests-pattern', '^sampletests_discover$',
+ ... ]
+ >>> testrunner.run(defaults)
+ Running zope.testing.testrunner.layer.UnitTests tests:
+ Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
+ Ran 1 tests with 0 failures and 0 errors in N.NNN seconds.
+ Tearing down left over layers:
+ Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
+ False
+
+If the module neither provides a TestSuite nor has discoverable tests,
+testrunner will exit with an error to prevent acidentally missing test cases:
+
+ >>> sys.argv = ['test',
+ ... '--tests-pattern', '^sampletests_discover_notests$',
+ ... ]
+ >>> testrunner.run(defaults)
+ Test-module import failures:
+ <BLANKLINE>
+ Module: sample1.sampletests_discover_notests
+ <BLANKLINE>
+ TypeError: Module sample1.sampletests_discover_notests does not define any tests
+ <BLANKLINE>
+ <BLANKLINE>
+ <BLANKLINE>
+ Test-modules with import problems:
+ sample1.sampletests_discover_notests
+ Total: 0 tests, 0 failures, 0 errors in 0.000 seconds.
+ True
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-edge-cases.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-edge-cases.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-edge-cases.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -22,18 +22,19 @@
... '--tests-pattern', '^sampletestsf?$',
... ]
>>> sys.argv = ['test']
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
... # doctest: +ELLIPSIS
Test-module import failures:
<BLANKLINE>
Module: sampletestsf
<BLANKLINE>
+ Traceback (most recent call last):
ImportError: No module named sampletestsf
...
>>> sys.path.append(directory_with_tests)
>>> sys.argv = ['test']
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
... # doctest: +ELLIPSIS
Running samplelayers.Layer1 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
@@ -50,9 +51,28 @@
Total: 405 tests, 0 failures, 0 errors in N.NNN seconds.
False
+Bug #251759: The test runner's protection against descending into non-package
+directories was ineffective, e.g. picking up tests from eggs that were stored
+close by:
+
+ >>> directory_with_tests = os.path.join(this_directory, 'testrunner-ex-251759')
+
+ >>> defaults = [
+ ... '--test-path', directory_with_tests,
+ ... ]
+ >>> testrunner.run_internal(defaults)
+ Total: 0 tests, 0 failures, 0 errors in 0.000 seconds.
+ False
+
+
Debugging Edge Cases
--------------------
+ >>> directory_with_tests = os.path.join(this_directory, 'testrunner-ex')
+ >>> defaults = [
+ ... '--test-path', directory_with_tests,
+ ... '--tests-pattern', '^sampletestsf?$',
+ ... ]
>>> class Input:
... def __init__(self, src):
... self.lines = src.split('\n')
@@ -71,7 +91,7 @@
... sys.stdin = Input('p x\nc')
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t set_trace2').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
... # doctest: +ELLIPSIS
Running zope.testing.testrunner.layer.UnitTests tests:...
@@ -89,7 +109,7 @@
>>> sys.stdin = Input('n\np x\nc')
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t set_trace4').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
Running zope.testing.testrunner.layer.UnitTests tests:...
--Return--
@@ -110,14 +130,14 @@
>>> sys.stdin = Input('n\np x\nc')
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t set_trace3').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
Running zope.testing.testrunner.layer.UnitTests tests:...
--Return--
> doctest.py(351)set_trace()->None
-> pdb.Pdb.set_trace(self)
(Pdb) n
- > <doctest sample3.sampletests_d.set_trace3[1]>(3)...()
+ > <doctest sample3.sampletests_d.set_trace3[line 3, example 1]>(3)...()
-> y = x
(Pdb) p x
1
@@ -132,14 +152,14 @@
>>> sys.stdin = Input('n\np x\nc')
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t set_trace5').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
Running zope.testing.testrunner.layer.UnitTests tests:...
--Return--
> doctest.py(351)set_trace()->None
-> pdb.Pdb.set_trace(self)
(Pdb) n
- > <doctest set_trace5.txt[1]>(3)...()
+ > <doctest set_trace5.txt[line 2, example 1]>(3)...()
-> y = x
(Pdb) p x
1
@@ -155,7 +175,7 @@
>>> sys.stdin = Input('n\np x\nc')
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t set_trace6').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
Running zope.testing.testrunner.layer.UnitTests tests:...
--Return--
@@ -176,7 +196,7 @@
>>> sys.stdin = Input('p x\nc')
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t post_mortem2 -D').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
... # doctest: +NORMALIZE_WHITESPACE
Running zope.testing.testrunner.layer.UnitTests tests:...
@@ -206,7 +226,7 @@
>>> sys.stdin = Input('p x\nc')
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t post_mortem3 -D').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
... # doctest: +NORMALIZE_WHITESPACE
Running zope.testing.testrunner.layer.UnitTests tests:...
@@ -227,7 +247,7 @@
<BLANKLINE>
exceptions.ValueError:
<BLANKLINE>
- > <doctest sample3.sampletests_d.post_mortem3[1]>(1)...()
+ > <doctest sample3.sampletests_d.post_mortem3[line 3, example 1]>(1)...()
(Pdb) p x
1
(Pdb) c
@@ -238,7 +258,7 @@
>>> sys.stdin = Input('p x\nc')
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t post_mortem4 -D').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
... # doctest: +NORMALIZE_WHITESPACE
Running zope.testing.testrunner.layer.UnitTests tests:...
@@ -270,7 +290,7 @@
>>> sys.stdin = Input('p x\nc')
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t post_mortem5 -D').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
... # doctest: +NORMALIZE_WHITESPACE
Running zope.testing.testrunner.layer.UnitTests tests:...
@@ -292,7 +312,7 @@
<BLANKLINE>
exceptions.ValueError:
<BLANKLINE>
- > <doctest post_mortem5.txt[1]>(1)...()
+ > <doctest post_mortem5.txt[line 2, example 1]>(1)...()
(Pdb) p x
1
(Pdb) c
@@ -304,7 +324,7 @@
>>> sys.stdin = Input('p x\nc')
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t post_mortem6 -D').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
... # doctest: +NORMALIZE_WHITESPACE
Running zope.testing.testrunner.layer.UnitTests tests:...
@@ -338,7 +358,7 @@
>>> sys.stdin = Input('p x\nc')
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t post_mortem_failure2 -D').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
... # doctest: +NORMALIZE_WHITESPACE
Running zope.testing.testrunner.layer.UnitTests tests:...
@@ -373,7 +393,7 @@
>>> sys.stdin = Input('p x\nc')
>>> sys.argv = ('test -ssample3 --tests-pattern ^sampletests_d$'
... ' -t post_mortem_failure.txt -D').split()
- >>> try: testrunner.run(defaults)
+ >>> try: testrunner.run_internal(defaults)
... finally: sys.stdin = real_stdin
... # doctest: +NORMALIZE_WHITESPACE
Running zope.testing.testrunner.layer.UnitTests tests:...
@@ -405,7 +425,7 @@
Post-mortem debugging with triple verbosity
>>> sys.argv = 'test --layer samplelayers.Layer1$ -vvv -D'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running samplelayers.Layer1 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
@@ -430,11 +450,12 @@
>>> sys.argv = ['test',
... '--tests-pattern', '^sampletests_none_suite$',
... ]
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Test-module import failures:
<BLANKLINE>
Module: sample1.sampletests_none_suite
<BLANKLINE>
+ Traceback (most recent call last):
TypeError: Invalid test_suite, None, in sample1.sampletests_none_suite
<BLANKLINE>
<BLANKLINE>
@@ -448,11 +469,12 @@
>>> sys.argv = ['test',
... '--tests-pattern', '^sampletests_none_test$',
... ]
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Test-module import failures:
<BLANKLINE>
Module: sample1.sampletests_none_test
<BLANKLINE>
+ Traceback (most recent call last):
TypeError: ...
<BLANKLINE>
<BLANKLINE>
@@ -469,7 +491,7 @@
repeat count greater than 1
>>> sys.argv = 'test -r'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
You must use the --repeat (-N) option to specify a repeat
count greater than 1 when using the --report_refcounts (-r)
option.
@@ -477,9 +499,10 @@
True
>>> sys.argv = 'test -r -N1'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
You must use the --repeat (-N) option to specify a repeat
count greater than 1 when using the --report_refcounts (-r)
option.
<BLANKLINE>
True
+
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-errors.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-errors.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-errors.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -16,7 +16,7 @@
... for f in files:
... shutil.copy(os.path.join(root, f),
... os.path.join(directory_with_tests, root[n:], f))
-
+
>>> from zope.testing import testrunner
>>> defaults = [
... '--path', directory_with_tests,
@@ -24,7 +24,7 @@
... ]
>>> sys.argv = 'test --tests-pattern ^sampletests(f|_e|_f)?$ '.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Running samplelayers.Layer1 tests:
...
@@ -44,12 +44,13 @@
Traceback (most recent call last):
File ".../doctest.py", line 1256, in __run
compileflags, 1) in test.globs
- File "<doctest sample2.sampletests_e.eek[0]>", line 1, in ?
+ File "<doctest sample2.sampletests_e.eek[line 2, example 0]>", line 1, in ?
f()
File "testrunner-ex/sample2/sampletests_e.py", line 19, in f
g()
File "testrunner-ex/sample2/sampletests_e.py", line 24, in g
x = y + 1
+ - __traceback_info__: I don't know what Y should be.
NameError: global name 'y' is not defined
<BLANKLINE>
<BLANKLINE>
@@ -62,6 +63,7 @@
g()
File "testrunner-ex/sample2/sampletests_e.py", line 24, in g
x = y + 1
+ - __traceback_info__: I don't know what Y should be.
NameError: global name 'y' is not defined
<BLANKLINE>
<BLANKLINE>
@@ -78,9 +80,9 @@
Traceback (most recent call last):
File ".../doctest.py", line 1256, in __run
compileflags, 1) in test.globs
- File "<doctest e.txt[1]>", line 1, in ?
+ File "<doctest e.txt[line 4, example 1]>", line 1, in ?
f()
- File "<doctest e.txt[0]>", line 2, in f
+ File "<doctest e.txt[line 1, example 0]>", line 2, in f
return x
NameError: global name 'x' is not defined
<BLANKLINE>
@@ -108,7 +110,7 @@
there'll be a summary of the errors at the end of the test:
>>> sys.argv = 'test --tests-pattern ^sampletests(f|_e|_f)?$ -uv'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
... # doctest: +NORMALIZE_WHITESPACE +REPORT_NDIFF
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
@@ -129,12 +131,13 @@
Traceback (most recent call last):
File ".../doctest.py", line 1256, in __run
compileflags, 1) in test.globs
- File "<doctest sample2.sampletests_e.eek[0]>", line 1, in ?
+ File "<doctest sample2.sampletests_e.eek[line 2, example 0]>", line 1, in ?
f()
File "testrunner-ex/sample2/sampletests_e.py", line 19, in f
g()
File "testrunner-ex/sample2/sampletests_e.py", line 24, in g
x = y + 1
+ - __traceback_info__: I don't know what Y should be.
NameError: global name 'y' is not defined
<BLANKLINE>
...
@@ -148,6 +151,7 @@
g()
File "testrunner-ex/sample2/sampletests_e.py", line 24, in g
x = y + 1
+ - __traceback_info__: I don't know what Y should be.
NameError: global name 'y' is not defined
<BLANKLINE>
...
@@ -164,9 +168,9 @@
Traceback (most recent call last):
File ".../doctest.py", line 1256, in __run
compileflags, 1) in test.globs
- File "<doctest e.txt[1]>", line 1, in ?
+ File "<doctest e.txt[line 4, example 1]>", line 1, in ?
f()
- File "<doctest e.txt[0]>", line 2, in f
+ File "<doctest e.txt[line 1, example 0]>", line 2, in f
return x
NameError: global name 'x' is not defined
<BLANKLINE>
@@ -198,7 +202,7 @@
>>> sys.argv = ('test --tests-pattern ^sampletests(f|_e|_f)?$ -u -ssample2'
... ' -p').split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
... # doctest: +NORMALIZE_WHITESPACE +REPORT_NDIFF
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -218,18 +222,19 @@
Traceback (most recent call last):
File ".../doctest.py", line 1256, in __run
compileflags, 1) in test.globs
- File "<doctest sample2.sampletests_e.eek[0]>", line 1, in ?
+ File "<doctest sample2.sampletests_e.eek[line 2, example 0]>", line 1, in ?
f()
File "testrunner-ex/sample2/sampletests_e.py", line 19, in f
g()
File "testrunner-ex/sample2/sampletests_e.py", line 24, in g
x = y + 1
+ - __traceback_info__: I don't know what Y should be.
NameError: global name 'y' is not defined
<BLANKLINE>
- 2/56 (3.6%)\r
- \r
- 3/56 (5.4%)\r
- \r
+ 2/56 (3.6%)##r##
+ ##r##
+ 3/56 (5.4%)##r##
+ ##r##
4/56 (7.1%)
<BLANKLINE>
Error in test test3 (sample2.sampletests_e.Test)
@@ -240,12 +245,13 @@
g()
File "testrunner-ex/sample2/sampletests_e.py", line 24, in g
x = y + 1
+ - __traceback_info__: I don't know what Y should be.
NameError: global name 'y' is not defined
<BLANKLINE>
- 5/56 (8.9%)\r
- \r
- 6/56 (10.7%)\r
- \r
+ 5/56 (8.9%)##r##
+ ##r##
+ 6/56 (10.7%)##r##
+ ##r##
7/56 (12.5%)
<BLANKLINE>
Failure in test testrunner-ex/sample2/e.txt
@@ -260,9 +266,9 @@
Traceback (most recent call last):
File ".../doctest.py", line 1256, in __run
compileflags, 1) in test.globs
- File "<doctest e.txt[1]>", line 1, in ?
+ File "<doctest e.txt[line 4, example 1]>", line 1, in ?
f()
- File "<doctest e.txt[0]>", line 2, in f
+ File "<doctest e.txt[line 1, example 0]>", line 2, in f
return x
NameError: global name 'x' is not defined
<BLANKLINE>
@@ -276,85 +282,84 @@
raise self.failureException, \
AssertionError: 1 != 0
<BLANKLINE>
- 9/56 (16.1%)\r
- \r
- 10/56 (17.9%)\r
- \r
- 11/56 (19.6%)\r
- \r
- 12/56 (21.4%)\r
- \r
- 13/56 (23.2%)\r
- \r
- 14/56 (25.0%)\r
- \r
- 15/56 (26.8%)\r
- \r
- 16/56 (28.6%)\r
- \r
- 17/56 (30.4%)\r
- \r
- 18/56 (32.1%)\r
- \r
- 19/56 (33.9%)\r
- \r
- 20/56 (35.7%)\r
- \r
- 24/56 (42.9%)\r
- \r
- 25/56 (44.6%)\r
- \r
- 26/56 (46.4%)\r
- \r
- 27/56 (48.2%)\r
- \r
- 28/56 (50.0%)\r
- \r
- 29/56 (51.8%)\r
- \r
- 30/56 (53.6%)\r
- \r
- 31/56 (55.4%)\r
- \r
- 32/56 (57.1%)\r
- \r
- 33/56 (58.9%)\r
- \r
- 34/56 (60.7%)\r
- \r
- 35/56 (62.5%)\r
- \r
- 36/56 (64.3%)\r
- \r
- 40/56 (71.4%)\r
- \r
- 41/56 (73.2%)\r
- \r
- 42/56 (75.0%)\r
- \r
- 43/56 (76.8%)\r
- \r
- 44/56 (78.6%)\r
- \r
- 45/56 (80.4%)\r
- \r
- 46/56 (82.1%)\r
- \r
- 47/56 (83.9%)\r
- \r
- 48/56 (85.7%)\r
- \r
- 49/56 (87.5%)\r
- \r
- 50/56 (89.3%)\r
- \r
- 51/56 (91.1%)\r
- \r
- 52/56 (92.9%)\r
- \r
- 56/56 (100.0%)\r
- \r
- <BLANKLINE>
+ 9/56 (16.1%)##r##
+ ##r##
+ 10/56 (17.9%)##r##
+ ##r##
+ 11/56 (19.6%)##r##
+ ##r##
+ 12/56 (21.4%)##r##
+ ##r##
+ 13/56 (23.2%)##r##
+ ##r##
+ 14/56 (25.0%)##r##
+ ##r##
+ 15/56 (26.8%)##r##
+ ##r##
+ 16/56 (28.6%)##r##
+ ##r##
+ 17/56 (30.4%)##r##
+ ##r##
+ 18/56 (32.1%)##r##
+ ##r##
+ 19/56 (33.9%)##r##
+ ##r##
+ 20/56 (35.7%)##r##
+ ##r##
+ 24/56 (42.9%)##r##
+ ##r##
+ 25/56 (44.6%)##r##
+ ##r##
+ 26/56 (46.4%)##r##
+ ##r##
+ 27/56 (48.2%)##r##
+ ##r##
+ 28/56 (50.0%)##r##
+ ##r##
+ 29/56 (51.8%)##r##
+ ##r##
+ 30/56 (53.6%)##r##
+ ##r##
+ 31/56 (55.4%)##r##
+ ##r##
+ 32/56 (57.1%)##r##
+ ##r##
+ 33/56 (58.9%)##r##
+ ##r##
+ 34/56 (60.7%)##r##
+ ##r##
+ 35/56 (62.5%)##r##
+ ##r##
+ 36/56 (64.3%)##r##
+ ##r##
+ 40/56 (71.4%)##r##
+ ##r##
+ 41/56 (73.2%)##r##
+ ##r##
+ 42/56 (75.0%)##r##
+ ##r##
+ 43/56 (76.8%)##r##
+ ##r##
+ 44/56 (78.6%)##r##
+ ##r##
+ 45/56 (80.4%)##r##
+ ##r##
+ 46/56 (82.1%)##r##
+ ##r##
+ 47/56 (83.9%)##r##
+ ##r##
+ 48/56 (85.7%)##r##
+ ##r##
+ 49/56 (87.5%)##r##
+ ##r##
+ 50/56 (89.3%)##r##
+ ##r##
+ 51/56 (91.1%)##r##
+ ##r##
+ 52/56 (92.9%)##r##
+ ##r##
+ 56/56 (100.0%)##r##
+ ##r##
Ran 56 tests with 3 failures and 1 errors in 0.054 seconds.
Tearing down left over layers:
Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -371,7 +376,7 @@
examples in the same test to fail. Each failure is reported:
>>> sys.argv = 'test --tests-pattern ^sampletests_1$'.split()
- >>> testrunner.run(defaults) # doctest: +NORMALIZE_WHITESPACE
+ >>> testrunner.run_internal(defaults) # doctest: +NORMALIZE_WHITESPACE
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
<BLANKLINE>
@@ -389,7 +394,7 @@
Traceback (most recent call last):
File ".../doctest.py", line 1256, in __run
compileflags, 1) in test.globs
- File "<doctest sample2.sampletests_1.eek[0]>", line 1, in ?
+ File "<doctest sample2.sampletests_1.eek[line 2, example 0]>", line 1, in ?
x = y
NameError: name 'y' is not defined
----------------------------------------------------------------------
@@ -401,7 +406,7 @@
Traceback (most recent call last):
File ".../doctest.py", line 1256, in __run
compileflags, 1) in test.globs
- File "<doctest sample2.sampletests_1.eek[1]>", line 1, in ?
+ File "<doctest sample2.sampletests_1.eek[line 4, example 1]>", line 1, in ?
x
NameError: name 'x' is not defined
----------------------------------------------------------------------
@@ -413,7 +418,7 @@
Traceback (most recent call last):
File ".../doctest.py", line 1256, in __run
compileflags, 1) in test.globs
- File "<doctest sample2.sampletests_1.eek[2]>", line 1, in ?
+ File "<doctest sample2.sampletests_1.eek[line 7, example 2]>", line 1, in ?
z = x + 1
NameError: name 'x' is not defined
<BLANKLINE>
@@ -428,7 +433,7 @@
me the first failed example in a doctest" :)
>>> sys.argv = 'test --tests-pattern ^sampletests_1$ -1'.split()
- >>> testrunner.run(defaults) # doctest:
+ >>> testrunner.run_internal(defaults) # doctest:
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
<BLANKLINE>
@@ -446,7 +451,7 @@
Traceback (most recent call last):
File ".../doctest.py", line 1256, in __run
compileflags, 1) in test.globs
- File "<doctest sample2.sampletests_1.eek[0]>", line 1, in ?
+ File "<doctest sample2.sampletests_1.eek[line 2, example 0]>", line 1, in ?
x = y
NameError: name 'y' is not defined
<BLANKLINE>
@@ -461,7 +466,7 @@
... 'test --tests-pattern ^sampletests_1$'
... ' --hide-secondary-failures'
... ).split()
- >>> testrunner.run(defaults) # doctest:
+ >>> testrunner.run_internal(defaults) # doctest:
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
<BLANKLINE>
@@ -479,7 +484,7 @@
Traceback (most recent call last):
File ".../doctest.py", line 1256, in __run
compileflags, 1) in test.globs
- File "<doctest sample2.sampletests_1.eek[0]>", line 1, in ?
+ File "<doctest sample2.sampletests_1.eek[line 2, example 0]>", line 1, in ?
x = y
NameError: name 'y' is not defined
<BLANKLINE>
@@ -497,7 +502,7 @@
... 'test --tests-pattern ^sampletests_1$'
... ' --hide-secondary-failures --show-secondary-failures'
... ).split()
- >>> testrunner.run(defaults) # doctest: +NORMALIZE_WHITESPACE
+ >>> testrunner.run_internal(defaults) # doctest: +NORMALIZE_WHITESPACE
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
<BLANKLINE>
@@ -515,7 +520,7 @@
Traceback (most recent call last):
File ".../doctest.py", line 1256, in __run
compileflags, 1) in test.globs
- File "<doctest sample2.sampletests_1.eek[0]>", line 1, in ?
+ File "<doctest sample2.sampletests_1.eek[line 2, example 0]>", line 1, in ?
x = y
NameError: name 'y' is not defined
----------------------------------------------------------------------
@@ -527,7 +532,7 @@
Traceback (most recent call last):
File ".../doctest.py", line 1256, in __run
compileflags, 1) in test.globs
- File "<doctest sample2.sampletests_1.eek[1]>", line 1, in ?
+ File "<doctest sample2.sampletests_1.eek[line 4, example 1]>", line 1, in ?
x
NameError: name 'x' is not defined
----------------------------------------------------------------------
@@ -539,7 +544,7 @@
Traceback (most recent call last):
File ".../doctest.py", line 1256, in __run
compileflags, 1) in test.globs
- File "<doctest sample2.sampletests_1.eek[2]>", line 1, in ?
+ File "<doctest sample2.sampletests_1.eek[line 7, example 2]>", line 1, in ?
z = x + 1
NameError: name 'x' is not defined
<BLANKLINE>
@@ -558,7 +563,7 @@
kinds.
>>> sys.argv = 'test --tests-pattern ^pledge$'.split()
- >>> _ = testrunner.run(defaults)
+ >>> _ = testrunner.run_internal(defaults)
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
<BLANKLINE>
@@ -598,7 +603,7 @@
caret marking the mismatching column positions.
>>> sys.argv = 'test --tests-pattern ^pledge$ --ndiff'.split()
- >>> _ = testrunner.run(defaults)
+ >>> _ = testrunner.run_internal(defaults)
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
<BLANKLINE>
@@ -626,7 +631,7 @@
The -udiff option requests a standard "unified" diff:
>>> sys.argv = 'test --tests-pattern ^pledge$ --udiff'.split()
- >>> _ = testrunner.run(defaults)
+ >>> _ = testrunner.run_internal(defaults)
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
<BLANKLINE>
@@ -653,7 +658,7 @@
The -cdiff option requests a standard "context" diff:
>>> sys.argv = 'test --tests-pattern ^pledge$ --cdiff'.split()
- >>> _ = testrunner.run(defaults)
+ >>> _ = testrunner.run_internal(defaults)
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
<BLANKLINE>
@@ -702,12 +707,13 @@
>>> sys.argv = ('test --tests-pattern ^sampletests(f|_i)?$ --layer 1 '
... ).split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
... # doctest: +NORMALIZE_WHITESPACE
Test-module import failures:
<BLANKLINE>
Module: sample2.sampletests_i
<BLANKLINE>
+ Traceback (most recent call last):
File "testrunner-ex/sample2/sampletests_i.py", line 1
importx unittest
^
@@ -722,11 +728,6 @@
ImportError: No module named huh
<BLANKLINE>
<BLANKLINE>
- Module: sample2.sample22.sampletests_i
- <BLANKLINE>
- AttributeError: 'module' object has no attribute 'test_suite'
- <BLANKLINE>
- <BLANKLINE>
Module: sample2.sample23.sampletests_i
<BLANKLINE>
Traceback (most recent call last):
@@ -772,7 +773,6 @@
Test-modules with import problems:
sample2.sampletests_i
sample2.sample21.sampletests_i
- sample2.sample22.sampletests_i
sample2.sample23.sampletests_i
Total: 213 tests, 0 failures, 0 errors in N.NNN seconds.
True
@@ -785,7 +785,7 @@
unicode and another not:
>>> sys.argv = 'test --tests-pattern ^unicode$ -u'.split()
- >>> testrunner.run(defaults) # doctest: +REPORT_NDIFF
+ >>> testrunner.run_internal(defaults) # doctest: +REPORT_NDIFF
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
<BLANKLINE>
@@ -816,18 +816,19 @@
Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
True
-
+
Reporting Errors to Calling Processes
-------------------------------------
-The testrunner can return an error status, indicating that the tests
+The testrunner returns the error status, indicating that the tests
failed. This can be useful for an invoking process that wants to
monitor the result of a test run.
-To use, specify the argument "--exit-with-status".
+This is applied when invoking the testrunner using the ``run()`` function
+instead of ``run_internal()``:
>>> sys.argv = (
- ... 'test --exit-with-status --tests-pattern ^sampletests_1$'.split())
+ ... 'test --tests-pattern ^sampletests_1$'.split())
>>> try:
... testrunner.run(defaults)
... except SystemExit, e:
@@ -843,14 +844,14 @@
Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
exited with code 1
-A passing test does not exit.
+Passing tests exit with code 0 according to UNIX practices:
>>> sys.argv = (
- ... 'test --exit-with-status --tests-pattern ^sampletests$'.split())
+ ... 'test --tests-pattern ^sampletests$'.split())
>>> try:
... testrunner.run(defaults)
... except SystemExit, e2:
- ... print 'oops'
+ ... print 'exited with code', e2.code
... else:
... print 'sys.exit was not called'
... # doctest: +ELLIPSIS
@@ -865,8 +866,7 @@
Tearing down left over layers:
Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
Total: 364 tests, 0 failures, 0 errors in N.NNN seconds.
- ...
- sys.exit was not called
+ exited with code 0
And remove the temporary directory:
Copied: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample1/sampletests_discover.py (from rev 103876, zope.testing/trunk/src/zope/testing/testrunner/testrunner-ex/sample1/sampletests_discover.py)
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample1/sampletests_discover.py (rev 0)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample1/sampletests_discover.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -0,0 +1,5 @@
+import unittest
+
+class TestA(unittest.TestCase):
+ def test_truth(self):
+ self.assert_(True)
Copied: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample1/sampletests_discover_notests.py (from rev 103876, zope.testing/trunk/src/zope/testing/testrunner/testrunner-ex/sample1/sampletests_discover_notests.py)
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample1/sampletests_discover_notests.py (rev 0)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample1/sampletests_discover_notests.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -0,0 +1,2 @@
+def test_function_that_would_never_be_run():
+ self.assert_(True)
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_1.py
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_1.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_1.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -26,4 +26,4 @@
"""
def test_suite():
- return doctest.DocTestSuite()
+ return doctest.DocTestSuite(optionflags=doctest.REPORT_NDIFF)
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_e.py
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_e.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_e.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -21,6 +21,7 @@
def g():
x = 1
x = x + 1
+ __traceback_info__ = "I don't know what Y should be."
x = y + 1
x = x + 1
Copied: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sampletests_buffering.py (from rev 103876, zope.testing/trunk/src/zope/testing/testrunner/testrunner-ex/sampletests_buffering.py)
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sampletests_buffering.py (rev 0)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-ex/sampletests_buffering.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -0,0 +1,70 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Sample tests with sleep and layers that can't be torn down
+
+$Id$
+"""
+
+import unittest, time
+
+class Layer1:
+
+ def setUp(self):
+ pass
+ setUp = classmethod(setUp)
+
+ def tearDown(self):
+ raise NotImplementedError
+ tearDown = classmethod(tearDown)
+
+
+class Layer2:
+
+ def setUp(self):
+ pass
+ setUp = classmethod(setUp)
+
+ def tearDown(self):
+ raise NotImplementedError
+ tearDown = classmethod(tearDown)
+
+
+class TestSomething1(unittest.TestCase):
+
+ layer = Layer1
+
+ def test_something(self):
+ pass
+
+
+class TestSomething2(unittest.TestCase):
+
+ layer = Layer2
+
+ def test_something(self):
+ time.sleep(0.5)
+
+ def test_something2(self):
+ time.sleep(0.5)
+
+
+def test_suite():
+ suite = unittest.TestSuite()
+ suite.addTest(unittest.makeSuite(TestSomething1))
+ suite.addTest(unittest.makeSuite(TestSomething2))
+ return suite
+
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-gc.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-gc.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-gc.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -17,7 +17,7 @@
>>> from zope.testing import testrunner
>>> sys.argv = 'test --tests-pattern ^gc0$ --gc 0 -vv'.split()
- >>> _ = testrunner.run(defaults)
+ >>> _ = testrunner.run_internal(defaults)
Cyclic garbage collection is disabled.
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
@@ -34,7 +34,7 @@
by providing a low threshold:
>>> sys.argv = 'test --tests-pattern ^gc1$ --gc 1 -vv'.split()
- >>> _ = testrunner.run(defaults)
+ >>> _ = testrunner.run_internal(defaults)
Cyclic garbage collection threshold set to: (1,)
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
@@ -51,7 +51,7 @@
>>> sys.argv = ('test --tests-pattern ^gcset$ --gc 701 --gc 11 --gc 9 -vv'
... .split())
- >>> _ = testrunner.run(defaults)
+ >>> _ = testrunner.run_internal(defaults)
Cyclic garbage collection threshold set to: (701, 11, 9)
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
@@ -79,7 +79,7 @@
>>> sys.argv = ('test --tests-pattern ^gcstats$ -G DEBUG_STATS'
... ' -G DEBUG_COLLECTABLE -vv'
... .split())
- >>> _ = testrunner.run(defaults)
+ >>> _ = testrunner.run_internal(defaults)
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-knit.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-knit.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-knit.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -41,7 +41,7 @@
>>> from zope.testing import testrunner
>>> sys.argv = 'test --layer Layer111 -vv'.split()
- >>> _ = testrunner.run(defaults)
+ >>> _ = testrunner.run_internal(defaults)
Running tests at level 1
Running samplelayers.Layer111 tests:
Set up samplelayers.Layerx in 0.000 seconds.
@@ -69,7 +69,7 @@
or individual packages within knit-in packages:
>>> sys.argv = 'test --package sample4.products -vv'.split()
- >>> _ = testrunner.run(defaults)
+ >>> _ = testrunner.run_internal(defaults)
Running tests at level 1
Running samplelayers.Layer111 tests:
Set up samplelayers.Layerx in 0.000 seconds.
@@ -87,7 +87,7 @@
Tear down samplelayers.Layer1 in 0.000 seconds.
>>> sys.argv = 'test --package sample4.products.more -vv'.split()
- >>> _ = testrunner.run(defaults)
+ >>> _ = testrunner.run_internal(defaults)
Running tests at level 1
Running samplelayers.Layer111 tests:
Set up samplelayers.Layerx in 0.000 seconds.
Copied: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-layers-buff.txt (from rev 103876, zope.testing/trunk/src/zope/testing/testrunner/testrunner-layers-buff.txt)
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-layers-buff.txt (rev 0)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-layers-buff.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -0,0 +1,142 @@
+This is a test for a fix in buffering of output from a layer in a subprocess.
+
+Prior to the change that this tests, output from within a test layer in a
+subprocess would be buffered. This could wreak havoc on supervising processes
+(or human) that would kill a test run if no output was seen for some period of
+time.
+
+First, we wrap stdout with an object that instruments it. It notes the time at
+which a given line was written.
+
+ >>> import os, sys, datetime
+ >>> class RecordingStreamWrapper:
+ ... def __init__(self, wrapped):
+ ... self.record = []
+ ... self.wrapped = wrapped
+ ... def write(self, out):
+ ... self.record.append((out, datetime.datetime.now()))
+ ... self.wrapped.write(out)
+ ... def flush(self):
+ ... self.wrapped.flush()
+ ...
+ >>> sys.stdout = RecordingStreamWrapper(sys.stdout)
+
+Now we actually call our test. If you open the file to which we are referring
+here (zope/testing/testrunner-ex/sampletests_buffering.py) you will see two test
+suites, each with its own layer that does not know how to tear down. This
+forces the second suite to be run in a subprocess.
+
+That second suite has two tests. Both sleep for half a second each.
+
+ >>> directory_with_tests = os.path.join(this_directory, 'testrunner-ex')
+ >>> from zope.testing import testrunner
+ >>> defaults = [
+ ... '--path', directory_with_tests,
+ ... ]
+ >>> argv = [sys.argv[0],
+ ... '-vv', '--tests-pattern', '^sampletests_buffering.*']
+
+ >>> try:
+ ... testrunner.run_internal(defaults, argv)
+ ... record = sys.stdout.record
+ ... finally:
+ ... sys.stdout = sys.stdout.wrapped
+ ...
+ Running tests at level 1
+ Running sampletests_buffering.Layer1 tests:
+ Set up sampletests_buffering.Layer1 in N.NNN seconds.
+ Running:
+ test_something (sampletests_buffering.TestSomething1)
+ Ran 1 tests with 0 failures and 0 errors in N.NNN seconds.
+ Running sampletests_buffering.Layer2 tests:
+ Tear down sampletests_buffering.Layer1 ... not supported
+ Running in a subprocess.
+ Set up sampletests_buffering.Layer2 in N.NNN seconds.
+ Running:
+ test_something (sampletests_buffering.TestSomething2)
+ test_something2 (sampletests_buffering.TestSomething2)
+ Ran 2 tests with 0 failures and 0 errors in N.NNN seconds.
+ Tear down sampletests_buffering.Layer2 ... not supported
+ Total: 3 tests, 0 failures, 0 errors in N.NNN seconds.
+ False
+
+Now we actually check the results we care about. We should see that there are
+two pauses of about half a second, one around the first test and one around the
+second. Before the change that this test verifies, there was a single pause of
+more than a second after the second suite ran.
+
+ >>> pause = datetime.timedelta(seconds=0.3)
+ >>> last_line, last_time = record.pop(0)
+ >>> for line, time in record:
+ ... if (time-last_time >= pause and
+ ... line != ' Running in a subprocess.\n'):
+ ... # We paused!
+ ... print 'PAUSE FOUND BETWEEN THESE LINES:'
+ ... print ''.join([last_line, line, '-'*70])
+ ... last_line, last_time = line, time
+ PAUSE FOUND BETWEEN THESE LINES:
+ Running:
+ test_something (sampletests_buffering.TestSomething2)
+ ----------------------------------------------------------------------
+ PAUSE FOUND BETWEEN THESE LINES:
+ test_something (sampletests_buffering.TestSomething2)
+ test_something2 (sampletests_buffering.TestSomething2)
+ ----------------------------------------------------------------------
+
+Because this is a test based on timing, it may be somewhat fragile. However,
+on a relatively slow machine, this timing works out fine; I'm hopeful that this
+test will not be a source of spurious errors. If it is, we will have to
+readdress.
+
+Now let's do the same thing, but with multiple processes at once. We'll get
+a different result that has similar characteristics.
+
+ >>> sys.stdout = RecordingStreamWrapper(sys.stdout)
+ >>> argv.extend(['-j', '2'])
+ >>> try:
+ ... testrunner.run_internal(defaults, argv)
+ ... record = sys.stdout.record
+ ... finally:
+ ... sys.stdout = sys.stdout.wrapped
+ ...
+ Running tests at level 1
+ Running sampletests_buffering.Layer1 tests:
+ Set up sampletests_buffering.Layer1 in N.NNN seconds.
+ Running:
+ test_something (sampletests_buffering.TestSomething1)
+ Ran 1 tests with 0 failures and 0 errors in N.NNN seconds.
+ [Parallel tests running in sampletests_buffering.Layer2:
+ .. LAYER FINISHED]
+ Running sampletests_buffering.Layer2 tests:
+ Running in a subprocess.
+ Set up sampletests_buffering.Layer2 in N.NNN seconds.
+ Ran 2 tests with 0 failures and 0 errors in N.NNN seconds.
+ Total: 3 tests, 0 failures, 0 errors in N.NNN seconds.
+ False
+
+Notice that, with a -vv (or greater) verbosity, the parallel test run includes
+a progress report to keep track of tests run in the various layers. Because
+the actual results are saved to be displayed assembled in the original test
+order, the progress report shows up before we are given the news that the
+testrunner is starting Layer2. This is counterintuitive, but lets us keep the
+primary reporting information for the given layer in one location, while also
+giving us progress reports that can be used for keepalive analysis by a human or
+automated agent. In particular for the second point, notice below that, as
+before, the progress output is not buffered.
+
+ >>> last_line, last_time = record.pop(0)
+ >>> for line, time in record:
+ ... if (time-last_time >= pause and
+ ... line != ' Running in a subprocess.\n'):
+ ... # We paused!
+ ... print 'PAUSE FOUND BETWEEN THIS OUTPUT:'
+ ... print '\n'.join([last_line, line, '-'*70])
+ ... last_line, last_time = line, time
+ PAUSE FOUND BETWEEN THIS OUTPUT:
+ .
+ .
+ ----------------------------------------------------------------------
+ PAUSE FOUND BETWEEN THIS OUTPUT:
+ .
+ LAYER FINISHED
+ ----------------------------------------------------------------------
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-layers-ntd.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-layers-ntd.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-layers-ntd.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -14,7 +14,7 @@
... ]
>>> sys.argv = 'test -ssample2 --tests-pattern sampletests_ntd$'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running sample2.sampletests_ntd.Layer tests:
Set up sample2.sampletests_ntd.Layer in 0.000 seconds.
Ran 1 tests with 0 failures and 0 errors in 0.000 seconds.
@@ -27,7 +27,7 @@
resuming tests where it left off:
>>> sys.argv = [testrunner_script, '--tests-pattern', 'sampletests_ntd$']
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running sample1.sampletests_ntd.Layer tests:
Set up sample1.sampletests_ntd.Layer in N.NNN seconds.
Ran 1 tests with 0 failures and 0 errors in N.NNN seconds.
@@ -85,7 +85,7 @@
>>> sys.argv = [testrunner_script, '--tests-pattern', 'sampletests_ntd$',
... '-D', ]
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running sample1.sampletests_ntd.Layer tests:
Set up sample1.sampletests_ntd.Layer in N.NNN seconds.
Ran 1 tests with 0 failures and 0 errors in N.NNN seconds.
@@ -159,7 +159,7 @@
that is run as a subprocess:
>>> sys.argv = [testrunner_script, '--tests-pattern', 'sampletests_ntds']
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running sample1.sampletests_ntds.Layer tests:
Set up sample1.sampletests_ntds.Layer in 0.000 seconds.
Ran 1 tests with 0 failures and 0 errors in 0.000 seconds.
@@ -246,7 +246,7 @@
>>> sys.argv = [testrunner_script, '-s', 'sample2', '--tests-pattern',
... '(sampletests_ntd$|stderrtest)']
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running sample2.sampletests_ntd.Layer tests:
Set up sample2.sampletests_ntd.Layer in 0.000 seconds.
Ran 1 tests with 0 failures and 0 errors in 0.000 seconds.
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-layers.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-layers.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-layers.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -12,7 +12,7 @@
>>> sys.argv = 'test --layer 112 --layer Unit'.split()
>>> from zope.testing import testrunner
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running samplelayers.Layer112 tests:
Set up samplelayers.Layerx in N.NNN seconds.
Set up samplelayers.Layer1 in N.NNN seconds.
@@ -35,7 +35,7 @@
We can also specify that we want to run only the unit tests:
>>> sys.argv = 'test -u'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
Ran 192 tests with 0 failures and 0 errors in N.NNN seconds.
@@ -47,7 +47,7 @@
Or that we want to run all of the tests except for the unit tests:
>>> sys.argv = 'test -f'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running samplelayers.Layer1 tests:
Set up samplelayers.Layer1 in N.NNN seconds.
Ran 9 tests with 0 failures and 0 errors in N.NNN seconds.
@@ -85,7 +85,7 @@
Or we can explicitly say that we want both unit and non-unit tests.
>>> sys.argv = 'test -uf'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running samplelayers.Layer1 tests:
Set up samplelayers.Layer1 in N.NNN seconds.
Ran 9 tests with 0 failures and 0 errors in N.NNN seconds.
@@ -127,7 +127,7 @@
It is possible to force the layers to run in subprocesses and parallelize them.
>>> sys.argv = [testrunner_script, '-j2']
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running samplelayers.Layer1 tests:
Set up samplelayers.Layer1 in N.NNN seconds.
Ran 9 tests with 0 failures and 0 errors in N.NNN seconds.
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-leaks-err.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-leaks-err.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-leaks-err.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -15,7 +15,7 @@
>>> from zope.testing import testrunner
>>> sys.argv = 'test -r -N6'.split()
- >>> _ = testrunner.run(defaults)
+ >>> _ = testrunner.run_internal(defaults)
The Python you are running was not configured
with --with-pydebug. This is required to use
the --report-refcounts option.
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-profiling-cprofiler.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-profiling-cprofiler.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-profiling-cprofiler.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -18,7 +18,7 @@
When the tests are run, we get profiling output::
>>> from zope.testing import testrunner
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running...
...
ncalls tottime percall cumtime percall filename:lineno(function)...
@@ -28,7 +28,7 @@
>>> sys.argv = [testrunner_script, '-ssample2', '--profile=cProfile',
... '--tests-pattern', 'sampletests_ntd']
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running...
Tear down ... not supported...
ncalls tottime percall cumtime percall filename:lineno(function)...
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-profiling.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-profiling.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-profiling.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -1,10 +1,16 @@
Profiling
=========
+The testrunner supports hotshot and cProfile profilers. Hotshot profiler
+support does not work with python2.6
+ >>> import os.path, sys
+ >>> profiler = '--profile=hotshot'
+ >>> if sys.hexversion >= 0x02060000:
+ ... profiler = '--profile=cProfile'
+
The testrunner includes the ability to profile the test execution with hotshot
-via the --profile option.
+via the --profile option, if it a python <= 2.6
- >>> import os.path, sys
>>> directory_with_tests = os.path.join(this_directory, 'testrunner-ex')
>>> sys.path.append(directory_with_tests)
@@ -13,12 +19,12 @@
... '--tests-pattern', '^sampletestsf?$',
... ]
- >>> sys.argv = [testrunner_script, '--profile=hotshot']
+ >>> sys.argv = [testrunner_script, profiler]
When the tests are run, we get profiling output.
>>> from zope.testing import testrunner
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running samplelayers.Layer1 tests:
...
Running samplelayers.Layer11 tests:
@@ -32,9 +38,9 @@
Profiling also works across layers.
- >>> sys.argv = [testrunner_script, '-ssample2', '--profile=hotshot',
+ >>> sys.argv = [testrunner_script, '-ssample2', profiler,
... '--tests-pattern', 'sampletests_ntd']
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running...
Tear down ... not supported...
ncalls tottime percall cumtime percall filename:lineno(function)...
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-progress.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-progress.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-progress.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -15,65 +15,64 @@
>>> sys.argv = 'test --layer 122 -p'.split()
>>> from zope.testing import testrunner
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running samplelayers.Layer122 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
Set up samplelayers.Layer12 in 0.000 seconds.
Set up samplelayers.Layer122 in 0.000 seconds.
Running:
- 1/34 (2.9%)\r
- \r
- 2/34 (5.9%)\r
- \r
- 3/34 (8.8%)\r
- \r
- 4/34 (11.8%)\r
- \r
- 5/34 (14.7%)\r
- \r
- 6/34 (17.6%)\r
- \r
- 7/34 (20.6%)\r
- \r
- 8/34 (23.5%)\r
- \r
- 9/34 (26.5%)\r
- \r
- 10/34 (29.4%)\r
- \r
- 11/34 (32.4%)\r
- \r
- 12/34 (35.3%)\r
- \r
- 17/34 (50.0%)\r
- \r
- 18/34 (52.9%)\r
- \r
- 19/34 (55.9%)\r
- \r
- 20/34 (58.8%)\r
- \r
- 21/34 (61.8%)\r
- \r
- 22/34 (64.7%)\r
- \r
- 23/34 (67.6%)\r
- \r
- 24/34 (70.6%)\r
- \r
- 25/34 (73.5%)\r
- \r
- 26/34 (76.5%)\r
- \r
- 27/34 (79.4%)\r
- \r
- 28/34 (82.4%)\r
- \r
- 29/34 (85.3%)\r
- \r
- 34/34 (100.0%)\r
- \r
- <BLANKLINE>
+ 1/34 (2.9%)##r##
+ ##r##
+ 2/34 (5.9%)##r##
+ ##r##
+ 3/34 (8.8%)##r##
+ ##r##
+ 4/34 (11.8%)##r##
+ ##r##
+ 5/34 (14.7%)##r##
+ ##r##
+ 6/34 (17.6%)##r##
+ ##r##
+ 7/34 (20.6%)##r##
+ ##r##
+ 8/34 (23.5%)##r##
+ ##r##
+ 9/34 (26.5%)##r##
+ ##r##
+ 10/34 (29.4%)##r##
+ ##r##
+ 11/34 (32.4%)##r##
+ ##r##
+ 12/34 (35.3%)##r##
+ ##r##
+ 17/34 (50.0%)##r##
+ ##r##
+ 18/34 (52.9%)##r##
+ ##r##
+ 19/34 (55.9%)##r##
+ ##r##
+ 20/34 (58.8%)##r##
+ ##r##
+ 21/34 (61.8%)##r##
+ ##r##
+ 22/34 (64.7%)##r##
+ ##r##
+ 23/34 (67.6%)##r##
+ ##r##
+ 24/34 (70.6%)##r##
+ ##r##
+ 25/34 (73.5%)##r##
+ ##r##
+ 26/34 (76.5%)##r##
+ ##r##
+ 27/34 (79.4%)##r##
+ ##r##
+ 28/34 (82.4%)##r##
+ ##r##
+ 29/34 (85.3%)##r##
+ ##r##
+ 34/34 (100.0%)##r##
+ ##r##
Ran 34 tests with 0 failures and 0 errors in 0.008 seconds.
Tearing down left over layers:
Tear down samplelayers.Layer122 in 0.000 seconds.
@@ -81,7 +80,7 @@
Tear down samplelayers.Layer1 in 0.000 seconds.
False
-(Note that, in the examples above and below, we show "\r" followed by
+(Note that, in the examples above and below, we show "##r##" followed by
new lines where carriage returns would appear in actual output.)
Using a single level of verbosity causes test descriptions to be
@@ -89,66 +88,65 @@
width, when the terminal width can't be determined, is 80:
>>> sys.argv = 'test --layer 122 -pv'.split()
->>> testrunner.run(defaults)
+>>> testrunner.run_internal(defaults)
Running tests at level 1
Running samplelayers.Layer122 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
Set up samplelayers.Layer12 in 0.000 seconds.
Set up samplelayers.Layer122 in 0.000 seconds.
Running:
- 1/34 (2.9%) test_x1 (sample1.sampletests.test122.TestA)\r
- \r
- 2/34 (5.9%) test_y0 (sample1.sampletests.test122.TestA)\r
- \r
- 3/34 (8.8%) test_z0 (sample1.sampletests.test122.TestA)\r
- \r
- 4/34 (11.8%) test_x0 (sample1.sampletests.test122.TestB)\r
- \r
- 5/34 (14.7%) test_y1 (sample1.sampletests.test122.TestB)\r
- \r
- 6/34 (17.6%) test_z0 (sample1.sampletests.test122.TestB)\r
- \r
- 7/34 (20.6%) test_1 (sample1.sampletests.test122.TestNotMuch)\r
- \r
- 8/34 (23.5%) test_2 (sample1.sampletests.test122.TestNotMuch)\r
- \r
- 9/34 (26.5%) test_3 (sample1.sampletests.test122.TestNotMuch)\r
- \r
- 10/34 (29.4%) test_x0 (sample1.sampletests.test122)\r
- \r
- 11/34 (32.4%) test_y0 (sample1.sampletests.test122)\r
- \r
- 12/34 (35.3%) test_z1 (sample1.sampletests.test122)\r
- \r
- 17/34 (50.0%) ... /testrunner-ex/sample1/sampletests/../../sampletestsl.txt\r
- \r
- 18/34 (52.9%) test_x1 (sampletests.test122.TestA)\r
- \r
- 19/34 (55.9%) test_y0 (sampletests.test122.TestA)\r
- \r
- 20/34 (58.8%) test_z0 (sampletests.test122.TestA)\r
- \r
- 21/34 (61.8%) test_x0 (sampletests.test122.TestB)\r
- \r
- 22/34 (64.7%) test_y1 (sampletests.test122.TestB)\r
- \r
- 23/34 (67.6%) test_z0 (sampletests.test122.TestB)\r
- \r
- 24/34 (70.6%) test_1 (sampletests.test122.TestNotMuch)\r
- \r
- 25/34 (73.5%) test_2 (sampletests.test122.TestNotMuch)\r
- \r
- 26/34 (76.5%) test_3 (sampletests.test122.TestNotMuch)\r
- \r
- 27/34 (79.4%) test_x0 (sampletests.test122)\r
- \r
- 28/34 (82.4%) test_y0 (sampletests.test122)\r
- \r
- 29/34 (85.3%) test_z1 (sampletests.test122)\r
- \r
- 34/34 (100.0%) ... pe/testing/testrunner-ex/sampletests/../sampletestsl.txt\r
- \r
-<BLANKLINE>
+ 1/34 (2.9%) test_x1 (sample1.sampletests.test122.TestA)##r##
+ ##r##
+ 2/34 (5.9%) test_y0 (sample1.sampletests.test122.TestA)##r##
+ ##r##
+ 3/34 (8.8%) test_z0 (sample1.sampletests.test122.TestA)##r##
+ ##r##
+ 4/34 (11.8%) test_x0 (sample1.sampletests.test122.TestB)##r##
+ ##r##
+ 5/34 (14.7%) test_y1 (sample1.sampletests.test122.TestB)##r##
+ ##r##
+ 6/34 (17.6%) test_z0 (sample1.sampletests.test122.TestB)##r##
+ ##r##
+ 7/34 (20.6%) test_1 (sample1.sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 8/34 (23.5%) test_2 (sample1.sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 9/34 (26.5%) test_3 (sample1.sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 10/34 (29.4%) test_x0 (sample1.sampletests.test122)##r##
+ ##r##
+ 11/34 (32.4%) test_y0 (sample1.sampletests.test122)##r##
+ ##r##
+ 12/34 (35.3%) test_z1 (sample1.sampletests.test122)##r##
+ ##r##
+ 17/34 (50.0%) ... /testrunner-ex/sample1/sampletests/../../sampletestsl.txt##r##
+ ##r##
+ 18/34 (52.9%) test_x1 (sampletests.test122.TestA)##r##
+ ##r##
+ 19/34 (55.9%) test_y0 (sampletests.test122.TestA)##r##
+ ##r##
+ 20/34 (58.8%) test_z0 (sampletests.test122.TestA)##r##
+ ##r##
+ 21/34 (61.8%) test_x0 (sampletests.test122.TestB)##r##
+ ##r##
+ 22/34 (64.7%) test_y1 (sampletests.test122.TestB)##r##
+ ##r##
+ 23/34 (67.6%) test_z0 (sampletests.test122.TestB)##r##
+ ##r##
+ 24/34 (70.6%) test_1 (sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 25/34 (73.5%) test_2 (sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 26/34 (76.5%) test_3 (sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 27/34 (79.4%) test_x0 (sampletests.test122)##r##
+ ##r##
+ 28/34 (82.4%) test_y0 (sampletests.test122)##r##
+ ##r##
+ 29/34 (85.3%) test_z1 (sampletests.test122)##r##
+ ##r##
+ 34/34 (100.0%) ... pe/testing/testrunner-ex/sampletests/../sampletestsl.txt##r##
+ ##r##
Ran 34 tests with 0 failures and 0 errors in 0.008 seconds.
Tearing down left over layers:
Tear down samplelayers.Layer122 in 0.000 seconds.
@@ -166,66 +164,65 @@
... return 60
>>> old_curses = sys.modules.get('curses')
>>> sys.modules['curses'] = FakeCurses()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running samplelayers.Layer122 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
Set up samplelayers.Layer12 in 0.000 seconds.
Set up samplelayers.Layer122 in 0.000 seconds.
Running:
- 1/34 (2.9%) test_x1 (sample1.sampletests.test122.TestA)\r
- \r
- 2/34 (5.9%) test_y0 (sample1.sampletests.test122.TestA)\r
- \r
- 3/34 (8.8%) test_z0 (sample1.sampletests.test122.TestA)\r
- \r
- 4/34 (11.8%) test_x0 (...le1.sampletests.test122.TestB)\r
- \r
- 5/34 (14.7%) test_y1 (...le1.sampletests.test122.TestB)\r
- \r
- 6/34 (17.6%) test_z0 (...le1.sampletests.test122.TestB)\r
- \r
- 7/34 (20.6%) test_1 (...ampletests.test122.TestNotMuch)\r
- \r
- 8/34 (23.5%) test_2 (...ampletests.test122.TestNotMuch)\r
- \r
- 9/34 (26.5%) test_3 (...ampletests.test122.TestNotMuch)\r
- \r
- 10/34 (29.4%) test_x0 (sample1.sampletests.test122)\r
- \r
- 11/34 (32.4%) test_y0 (sample1.sampletests.test122)\r
- \r
- 12/34 (35.3%) test_z1 (sample1.sampletests.test122)\r
- \r
- 17/34 (50.0%) ... e1/sampletests/../../sampletestsl.txt\r
- \r
- 18/34 (52.9%) test_x1 (sampletests.test122.TestA)\r
- \r
- 19/34 (55.9%) test_y0 (sampletests.test122.TestA)\r
- \r
- 20/34 (58.8%) test_z0 (sampletests.test122.TestA)\r
- \r
- 21/34 (61.8%) test_x0 (sampletests.test122.TestB)\r
- \r
- 22/34 (64.7%) test_y1 (sampletests.test122.TestB)\r
- \r
- 23/34 (67.6%) test_z0 (sampletests.test122.TestB)\r
- \r
- 24/34 (70.6%) test_1 (sampletests.test122.TestNotMuch)\r
- \r
- 25/34 (73.5%) test_2 (sampletests.test122.TestNotMuch)\r
- \r
- 26/34 (76.5%) test_3 (sampletests.test122.TestNotMuch)\r
- \r
- 27/34 (79.4%) test_x0 (sampletests.test122)\r
- \r
- 28/34 (82.4%) test_y0 (sampletests.test122)\r
- \r
- 29/34 (85.3%) test_z1 (sampletests.test122)\r
- \r
- 34/34 (100.0%) ... r-ex/sampletests/../sampletestsl.txt\r
- \r
- <BLANKLINE>
+ 1/34 (2.9%) test_x1 (sample1.sampletests.test122.TestA)##r##
+ ##r##
+ 2/34 (5.9%) test_y0 (sample1.sampletests.test122.TestA)##r##
+ ##r##
+ 3/34 (8.8%) test_z0 (sample1.sampletests.test122.TestA)##r##
+ ##r##
+ 4/34 (11.8%) test_x0 (...le1.sampletests.test122.TestB)##r##
+ ##r##
+ 5/34 (14.7%) test_y1 (...le1.sampletests.test122.TestB)##r##
+ ##r##
+ 6/34 (17.6%) test_z0 (...le1.sampletests.test122.TestB)##r##
+ ##r##
+ 7/34 (20.6%) test_1 (...ampletests.test122.TestNotMuch)##r##
+ ##r##
+ 8/34 (23.5%) test_2 (...ampletests.test122.TestNotMuch)##r##
+ ##r##
+ 9/34 (26.5%) test_3 (...ampletests.test122.TestNotMuch)##r##
+ ##r##
+ 10/34 (29.4%) test_x0 (sample1.sampletests.test122)##r##
+ ##r##
+ 11/34 (32.4%) test_y0 (sample1.sampletests.test122)##r##
+ ##r##
+ 12/34 (35.3%) test_z1 (sample1.sampletests.test122)##r##
+ ##r##
+ 17/34 (50.0%) ... e1/sampletests/../../sampletestsl.txt##r##
+ ##r##
+ 18/34 (52.9%) test_x1 (sampletests.test122.TestA)##r##
+ ##r##
+ 19/34 (55.9%) test_y0 (sampletests.test122.TestA)##r##
+ ##r##
+ 20/34 (58.8%) test_z0 (sampletests.test122.TestA)##r##
+ ##r##
+ 21/34 (61.8%) test_x0 (sampletests.test122.TestB)##r##
+ ##r##
+ 22/34 (64.7%) test_y1 (sampletests.test122.TestB)##r##
+ ##r##
+ 23/34 (67.6%) test_z0 (sampletests.test122.TestB)##r##
+ ##r##
+ 24/34 (70.6%) test_1 (sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 25/34 (73.5%) test_2 (sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 26/34 (76.5%) test_3 (sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 27/34 (79.4%) test_x0 (sampletests.test122)##r##
+ ##r##
+ 28/34 (82.4%) test_y0 (sampletests.test122)##r##
+ ##r##
+ 29/34 (85.3%) test_z1 (sampletests.test122)##r##
+ ##r##
+ 34/34 (100.0%) ... r-ex/sampletests/../sampletestsl.txt##r##
+ ##r##
Ran 34 tests with 0 failures and 0 errors in 0.008 seconds.
Tearing down left over layers:
Tear down samplelayers.Layer122 in 0.000 seconds.
@@ -239,62 +236,61 @@
information.
>>> sys.argv = 'test --layer 122 -pvv -t !txt'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running samplelayers.Layer122 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
Set up samplelayers.Layer12 in 0.000 seconds.
Set up samplelayers.Layer122 in 0.000 seconds.
Running:
- 1/24 (4.2%) test_x1 (sample1.sampletests.test122.TestA)\r
- \r
- 2/24 (8.3%) test_y0 (sample1.sampletests.test122.TestA)\r
- \r
- 3/24 (12.5%) test_z0 (sample1.sampletests.test122.TestA)\r
- \r
- 4/24 (16.7%) test_x0 (sample1.sampletests.test122.TestB)\r
- \r
- 5/24 (20.8%) test_y1 (sample1.sampletests.test122.TestB)\r
- \r
- 6/24 (25.0%) test_z0 (sample1.sampletests.test122.TestB)\r
- \r
- 7/24 (29.2%) test_1 (sample1.sampletests.test122.TestNotMuch)\r
- \r
- 8/24 (33.3%) test_2 (sample1.sampletests.test122.TestNotMuch)\r
- \r
- 9/24 (37.5%) test_3 (sample1.sampletests.test122.TestNotMuch)\r
- \r
- 10/24 (41.7%) test_x0 (sample1.sampletests.test122)\r
- \r
- 11/24 (45.8%) test_y0 (sample1.sampletests.test122)\r
- \r
- 12/24 (50.0%) test_z1 (sample1.sampletests.test122)\r
- \r
- 13/24 (54.2%) test_x1 (sampletests.test122.TestA)\r
- \r
- 14/24 (58.3%) test_y0 (sampletests.test122.TestA)\r
- \r
- 15/24 (62.5%) test_z0 (sampletests.test122.TestA)\r
- \r
- 16/24 (66.7%) test_x0 (sampletests.test122.TestB)\r
- \r
- 17/24 (70.8%) test_y1 (sampletests.test122.TestB)\r
- \r
- 18/24 (75.0%) test_z0 (sampletests.test122.TestB)\r
- \r
- 19/24 (79.2%) test_1 (sampletests.test122.TestNotMuch)\r
- \r
- 20/24 (83.3%) test_2 (sampletests.test122.TestNotMuch)\r
- \r
- 21/24 (87.5%) test_3 (sampletests.test122.TestNotMuch)\r
- \r
- 22/24 (91.7%) test_x0 (sampletests.test122)\r
- \r
- 23/24 (95.8%) test_y0 (sampletests.test122)\r
- \r
- 24/24 (100.0%) test_z1 (sampletests.test122)\r
- \r
- <BLANKLINE>
+ 1/24 (4.2%) test_x1 (sample1.sampletests.test122.TestA)##r##
+ ##r##
+ 2/24 (8.3%) test_y0 (sample1.sampletests.test122.TestA)##r##
+ ##r##
+ 3/24 (12.5%) test_z0 (sample1.sampletests.test122.TestA)##r##
+ ##r##
+ 4/24 (16.7%) test_x0 (sample1.sampletests.test122.TestB)##r##
+ ##r##
+ 5/24 (20.8%) test_y1 (sample1.sampletests.test122.TestB)##r##
+ ##r##
+ 6/24 (25.0%) test_z0 (sample1.sampletests.test122.TestB)##r##
+ ##r##
+ 7/24 (29.2%) test_1 (sample1.sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 8/24 (33.3%) test_2 (sample1.sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 9/24 (37.5%) test_3 (sample1.sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 10/24 (41.7%) test_x0 (sample1.sampletests.test122)##r##
+ ##r##
+ 11/24 (45.8%) test_y0 (sample1.sampletests.test122)##r##
+ ##r##
+ 12/24 (50.0%) test_z1 (sample1.sampletests.test122)##r##
+ ##r##
+ 13/24 (54.2%) test_x1 (sampletests.test122.TestA)##r##
+ ##r##
+ 14/24 (58.3%) test_y0 (sampletests.test122.TestA)##r##
+ ##r##
+ 15/24 (62.5%) test_z0 (sampletests.test122.TestA)##r##
+ ##r##
+ 16/24 (66.7%) test_x0 (sampletests.test122.TestB)##r##
+ ##r##
+ 17/24 (70.8%) test_y1 (sampletests.test122.TestB)##r##
+ ##r##
+ 18/24 (75.0%) test_z0 (sampletests.test122.TestB)##r##
+ ##r##
+ 19/24 (79.2%) test_1 (sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 20/24 (83.3%) test_2 (sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 21/24 (87.5%) test_3 (sampletests.test122.TestNotMuch)##r##
+ ##r##
+ 22/24 (91.7%) test_x0 (sampletests.test122)##r##
+ ##r##
+ 23/24 (95.8%) test_y0 (sampletests.test122)##r##
+ ##r##
+ 24/24 (100.0%) test_z1 (sampletests.test122)##r##
+ ##r##
Ran 24 tests with 0 failures and 0 errors in 0.006 seconds.
Tearing down left over layers:
Tear down samplelayers.Layer122 in 0.000 seconds.
@@ -306,50 +302,49 @@
with '!' to exclude tests containing the string "txt".
>>> sys.argv = 'test --layer 122 -pvvv -t!(txt|NotMuch)'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running samplelayers.Layer122 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
Set up samplelayers.Layer12 in 0.000 seconds.
Set up samplelayers.Layer122 in 0.000 seconds.
Running:
- 1/18 (5.6%) test_x1 (sample1.sampletests.test122.TestA) (0.000 s)\r
- \r
- 2/18 (11.1%) test_y0 (sample1.sampletests.test122.TestA) (0.000 s)\r
- \r
- 3/18 (16.7%) test_z0 (sample1.sampletests.test122.TestA) (0.000 s)\r
- \r
- 4/18 (22.2%) test_x0 (sample1.sampletests.test122.TestB) (0.000 s)\r
- \r
- 5/18 (27.8%) test_y1 (sample1.sampletests.test122.TestB) (0.000 s)\r
- \r
- 6/18 (33.3%) test_z0 (sample1.sampletests.test122.TestB) (0.000 s)\r
- \r
- 7/18 (38.9%) test_x0 (sample1.sampletests.test122) (0.001 s)\r
- \r
- 8/18 (44.4%) test_y0 (sample1.sampletests.test122) (0.001 s)\r
- \r
- 9/18 (50.0%) test_z1 (sample1.sampletests.test122) (0.001 s)\r
- \r
- 10/18 (55.6%) test_x1 (sampletests.test122.TestA) (0.000 s)\r
- \r
- 11/18 (61.1%) test_y0 (sampletests.test122.TestA) (0.000 s)\r
- \r
- 12/18 (66.7%) test_z0 (sampletests.test122.TestA) (0.000 s)\r
- \r
- 13/18 (72.2%) test_x0 (sampletests.test122.TestB) (0.000 s)\r
- \r
- 14/18 (77.8%) test_y1 (sampletests.test122.TestB) (0.000 s)\r
- \r
- 15/18 (83.3%) test_z0 (sampletests.test122.TestB) (0.000 s)\r
- \r
- 16/18 (88.9%) test_x0 (sampletests.test122) (0.001 s)\r
- \r
- 17/18 (94.4%) test_y0 (sampletests.test122) (0.001 s)\r
- \r
- 18/18 (100.0%) test_z1 (sampletests.test122) (0.001 s)\r
- \r
- <BLANKLINE>
+ 1/18 (5.6%) test_x1 (sample1.sampletests.test122.TestA) (0.000 s)##r##
+ ##r##
+ 2/18 (11.1%) test_y0 (sample1.sampletests.test122.TestA) (0.000 s)##r##
+ ##r##
+ 3/18 (16.7%) test_z0 (sample1.sampletests.test122.TestA) (0.000 s)##r##
+ ##r##
+ 4/18 (22.2%) test_x0 (sample1.sampletests.test122.TestB) (0.000 s)##r##
+ ##r##
+ 5/18 (27.8%) test_y1 (sample1.sampletests.test122.TestB) (0.000 s)##r##
+ ##r##
+ 6/18 (33.3%) test_z0 (sample1.sampletests.test122.TestB) (0.000 s)##r##
+ ##r##
+ 7/18 (38.9%) test_x0 (sample1.sampletests.test122) (0.001 s)##r##
+ ##r##
+ 8/18 (44.4%) test_y0 (sample1.sampletests.test122) (0.001 s)##r##
+ ##r##
+ 9/18 (50.0%) test_z1 (sample1.sampletests.test122) (0.001 s)##r##
+ ##r##
+ 10/18 (55.6%) test_x1 (sampletests.test122.TestA) (0.000 s)##r##
+ ##r##
+ 11/18 (61.1%) test_y0 (sampletests.test122.TestA) (0.000 s)##r##
+ ##r##
+ 12/18 (66.7%) test_z0 (sampletests.test122.TestA) (0.000 s)##r##
+ ##r##
+ 13/18 (72.2%) test_x0 (sampletests.test122.TestB) (0.000 s)##r##
+ ##r##
+ 14/18 (77.8%) test_y1 (sampletests.test122.TestB) (0.000 s)##r##
+ ##r##
+ 15/18 (83.3%) test_z0 (sampletests.test122.TestB) (0.000 s)##r##
+ ##r##
+ 16/18 (88.9%) test_x0 (sampletests.test122) (0.001 s)##r##
+ ##r##
+ 17/18 (94.4%) test_y0 (sampletests.test122) (0.001 s)##r##
+ ##r##
+ 18/18 (100.0%) test_z1 (sampletests.test122) (0.001 s)##r##
+ ##r##
Ran 18 tests with 0 failures and 0 errors in 0.006 seconds.
Tearing down left over layers:
Tear down samplelayers.Layer122 in 0.000 seconds.
@@ -383,23 +378,22 @@
>>> sys.stdout = Terminal(sys.stdout)
>>> sys.argv = 'test -u -t test_one.TestNotMuch --auto-progress'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
Running:
- 1/6 (16.7%)\r
- \r
- 2/6 (33.3%)\r
- \r
- 3/6 (50.0%)\r
- \r
- 4/6 (66.7%)\r
- \r
- 5/6 (83.3%)\r
- \r
- 6/6 (100.0%)\r
- \r
- <BLANKLINE>
+ 1/6 (16.7%)##r##
+ ##r##
+ 2/6 (33.3%)##r##
+ ##r##
+ 3/6 (50.0%)##r##
+ ##r##
+ 4/6 (66.7%)##r##
+ ##r##
+ 5/6 (83.3%)##r##
+ ##r##
+ 6/6 (100.0%)##r##
+ ##r##
Ran 6 tests with 0 failures and 0 errors in N.NNN seconds.
Tearing down left over layers:
Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -411,7 +405,7 @@
>>> sys.stdout = real_stdout
>>> sys.argv = 'test -u -t test_one.TestNotMuch --auto-progress'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
Ran 6 tests with 0 failures and 0 errors in N.NNN seconds.
@@ -428,13 +422,10 @@
--no-progress:
>>> sys.argv = 'test -u -t test_one.TestNotMuch -p --no-progress'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
Ran 6 tests with 0 failures and 0 errors in N.NNN seconds.
Tearing down left over layers:
Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
False
-
-
-
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-repeat.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-repeat.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-repeat.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -14,7 +14,7 @@
>>> sys.argv = 'test --layer 112 --layer UnitTests --repeat 3'.split()
>>> from zope.testing import testrunner
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running samplelayers.Layer112 tests:
Set up samplelayers.Layerx in 0.000 seconds.
Set up samplelayers.Layer1 in 0.000 seconds.
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-simple.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-simple.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-simple.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -46,7 +46,7 @@
>>> from zope.testing import testrunner
>>> import sys
>>> sys.argv = ['test']
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running samplelayers.Layer1 tests:
Set up samplelayers.Layer1 in N.NNN seconds.
Ran 9 tests with 0 failures and 0 errors in N.NNN seconds.
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-test-selection.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-test-selection.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-test-selection.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -13,7 +13,7 @@
>>> sys.argv = 'test --layer 122 -ssample1 -vv'.split()
>>> from zope.testing import testrunner
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running samplelayers.Layer122 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
@@ -43,7 +43,7 @@
You can specify multiple packages:
>>> sys.argv = 'test -u -vv -ssample1 -ssample2'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -163,7 +163,7 @@
>>> subdir = os.path.join(directory_with_tests, 'sample1')
>>> sys.argv = ('test --layer 122 -s %s -vv' % subdir).split()
>>> from zope.testing import testrunner
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running samplelayers.Layer122 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
@@ -193,7 +193,7 @@
We can select by test module name using the --module (-m) option:
>>> sys.argv = 'test -u -vv -ssample1 -m_one -mtest1'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -233,7 +233,7 @@
and by test within the module using the --test (-t) option:
>>> sys.argv = 'test -u -vv -ssample1 -m_one -mtest1 -tx0 -ty0'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -253,7 +253,7 @@
>>> sys.argv = 'test -u -vv -ssample1 -ttxt'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -274,7 +274,7 @@
match the regular expression are selected:
>>> sys.argv = 'test -u -vv -ssample1 -m!sample1[.]sample1'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -328,7 +328,7 @@
>>> sys.argv = 'test -u -vv -ssample1 !sample1[.]sample1'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -379,7 +379,7 @@
>>> sys.argv = 'test -u -vv -ssample1 . txt'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -403,7 +403,7 @@
>>> sys.argv = 'test -u -vv -t test_y1 -t test_y0'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -454,7 +454,7 @@
additional tests:
>>> sys.argv = 'test -u -vv -a 2 -t test_y1 -t test_y0'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 2
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -506,7 +506,7 @@
We can use the --all option to run tests at all levels:
>>> sys.argv = 'test -u -vv --all -t test_y1 -t test_y0'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at all levels
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -564,7 +564,7 @@
specifications.
>>> sys.argv = 'test --all -m sample1 -t test_y0 --list-tests'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Listing samplelayers.Layer11 tests:
test_y0 (sample1.sampletests.test11.TestA)
test_y0 (sample1.sampletests.test11)
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-verbose.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-verbose.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-verbose.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -15,7 +15,7 @@
... ]
>>> sys.argv = 'test --layer 122 -v'.split()
>>> from zope.testing import testrunner
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running samplelayers.Layer122 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
@@ -34,7 +34,7 @@
50:
>>> sys.argv = 'test -uv'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -49,7 +49,7 @@
each test is printed as it is run:
>>> sys.argv = 'test --layer 122 -vv'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running samplelayers.Layer122 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
@@ -93,7 +93,7 @@
test-execution times are printed:
>>> sys.argv = 'test --layer 122 -vvv'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running tests at level 1
Running samplelayers.Layer122 tests:
Set up samplelayers.Layer1 in 0.000 seconds.
@@ -145,7 +145,7 @@
... '-v'
... ]
>>> sys.argv = 'test -q -u'.split()
- >>> testrunner.run(defaults)
+ >>> testrunner.run_internal(defaults)
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
Ran 192 tests with 0 failures and 0 errors in 0.034 seconds.
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-wo-source.txt
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-wo-source.txt 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/testrunner-wo-source.txt 2009-09-13 08:21:57 UTC (rev 103878)
@@ -42,7 +42,7 @@
... '-vv',
... ]
>>> sys.argv = ['test']
- >>> testrunner.run(mydefaults)
+ >>> testrunner.run_internal(mydefaults)
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
@@ -66,7 +66,7 @@
... for f in files:
... if f.endswith(".py"):
... os.remove(os.path.join(root, f))
- >>> testrunner.run(mydefaults, ["test", "--keepbytecode"])
+ >>> testrunner.run_internal(mydefaults, ["test", "--keepbytecode"])
Running tests at level 1
Total: 0 tests, 0 failures, 0 errors in N.NNN seconds.
False
@@ -77,7 +77,7 @@
of "removing stale bytecode ..." messages shows that ``--usecompiled``
also implies ``--keepbytecode``:
- >>> testrunner.run(mydefaults, ["test", "--usecompiled"])
+ >>> testrunner.run_internal(mydefaults, ["test", "--usecompiled"])
Running tests at level 1
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
Modified: zope.testing/branches/regebro-python3/src/zope/testing/testrunner/tests.py
===================================================================
--- zope.testing/branches/regebro-python3/src/zope/testing/testrunner/tests.py 2009-09-12 17:57:34 UTC (rev 103877)
+++ zope.testing/branches/regebro-python3/src/zope/testing/testrunner/tests.py 2009-09-13 08:21:57 UTC (rev 103878)
@@ -26,45 +26,103 @@
from zope.testing import renormalizing
-checker = renormalizing.RENormalizing([
- # 2.5 changed the way pdb reports exceptions
- (re.compile(r"<class 'exceptions.(\w+)Error'>:"),
- r'exceptions.\1Error:'),
+#separated checkers for the different platform,
+#because it s...s to maintain just one
+if sys.platform == 'win32':
+ checker = renormalizing.RENormalizing([
+ # 2.5 changed the way pdb reports exceptions
+ (re.compile(r"<class 'exceptions.(\w+)Error'>:"),
+ r'exceptions.\1Error:'),
- (re.compile('^> [^\n]+->None$', re.M), '> ...->None'),
- (re.compile(r"<module>"),(r'?')),
- (re.compile(r"<type 'exceptions.(\w+)Error'>:"),
- r'exceptions.\1Error:'),
- (re.compile("'[A-Za-z]:\\\\"), "'"), # hopefully, we'll make Windows happy
- (re.compile(r'\\\\'), '/'), # more Windows happiness
- (re.compile(r'\\'), '/'), # even more Windows happiness
- (re.compile('/r'), '\\\\r'), # undo damage from previous
- (re.compile(r'\r'), '\\\\r\n'),
- (re.compile(r'\d+[.]\d\d\d seconds'), 'N.NNN seconds'),
- (re.compile(r'\d+[.]\d\d\d s'), 'N.NNN s'),
- (re.compile(r'\d+[.]\d\d\d{'), 'N.NNN{'),
- (re.compile('( |")[^\n]+testrunner-ex'), r'\1testrunner-ex'),
- (re.compile('( |")[^\n]+testrunner.py'), r'\1testrunner.py'),
- (re.compile(r'> [^\n]*(doc|unit)test[.]py\(\d+\)'),
- r'\1test.py(NNN)'),
- (re.compile(r'[.]py\(\d+\)'), r'.py(NNN)'),
- (re.compile(r'[.]py:\d+'), r'.py:NNN'),
- (re.compile(r' line \d+,', re.IGNORECASE), r' Line NNN,'),
- (re.compile(r' line {([a-z]+)}\d+{', re.IGNORECASE), r' Line {\1}NNN{'),
+ #rewrite pdb prompt to ... the current location
+ #windows, py2.4 pdb seems not to put the '>' on doctest locations
+ #therefore we cut it here
+ (re.compile('^> doctest[^\n]+->None$', re.M), '...->None'),
- # omit traceback entries for unittest.py or doctest.py from
- # output:
- (re.compile(r'^ +File "[^\n]*(doc|unit)test.py", [^\n]+\n[^\n]+\n',
- re.MULTILINE),
- r''),
- (re.compile(r'^{\w+} +File "{\w+}[^\n]*(doc|unit)test.py{\w+}", [^\n]+\n[^\n]+\n',
- re.MULTILINE),
- r''),
- (re.compile('^> [^\n]+->None$', re.M), '> ...->None'),
- (re.compile('import pdb; pdb'), 'Pdb()'), # Py 2.3
- ])
+ #rewrite pdb prompt to ... the current location
+ (re.compile('^> [^\n]+->None$', re.M), '> ...->None'),
+ (re.compile(r"<module>"),(r'?')),
+ (re.compile(r"<type 'exceptions.(\w+)Error'>:"),
+ r'exceptions.\1Error:'),
+ (re.compile("'[A-Za-z]:\\\\"), "'"), # hopefully, we'll make Windows happy
+ # replaces drives with nothing
+
+ (re.compile(r'\\\\'), '/'), # more Windows happiness
+ # double backslashes in coverage???
+
+ (re.compile(r'\\'), '/'), # even more Windows happiness
+ # replaces backslashes in paths
+
+ #this is a magic to put linefeeds into the doctest
+ (re.compile('##r##\n'), '\r'),
+
+ (re.compile(r'\d+[.]\d\d\d seconds'), 'N.NNN seconds'),
+ (re.compile(r'\d+[.]\d\d\d s'), 'N.NNN s'),
+ (re.compile(r'\d+[.]\d\d\d{'), 'N.NNN{'),
+ (re.compile('( |")[^\n]+testrunner-ex'), r'\1testrunner-ex'),
+ (re.compile('( |")[^\n]+testrunner.py'), r'\1testrunner.py'),
+ (re.compile(r'> [^\n]*(doc|unit)test[.]py\(\d+\)'),
+ r'\1test.py(NNN)'),
+ (re.compile(r'[.]py\(\d+\)'), r'.py(NNN)'),
+ (re.compile(r'[.]py:\d+'), r'.py:NNN'),
+ (re.compile(r' line \d+,', re.IGNORECASE), r' Line NNN,'),
+ (re.compile(r' line {([a-z]+)}\d+{', re.IGNORECASE), r' Line {\1}NNN{'),
+
+ # omit traceback entries for unittest.py or doctest.py from
+ # output:
+ (re.compile(r'^ +File "[^\n]*(doc|unit)test.py", [^\n]+\n[^\n]+\n',
+ re.MULTILINE),
+ r''),
+ (re.compile(r'^{\w+} +File "{\w+}[^\n]*(doc|unit)test.py{\w+}", [^\n]+\n[^\n]+\n',
+ re.MULTILINE),
+ r''),
+ #(re.compile('^> [^\n]+->None$', re.M), '> ...->None'),
+ (re.compile('import pdb; pdb'), 'Pdb()'), # Py 2.3
+ ])
+else:
+ #*nix
+ checker = renormalizing.RENormalizing([
+ # 2.5 changed the way pdb reports exceptions
+ (re.compile(r"<class 'exceptions.(\w+)Error'>:"),
+ r'exceptions.\1Error:'),
+
+ #rewrite pdb prompt to ... the current location
+ (re.compile('^> [^\n]+->None$', re.M), '> ...->None'),
+
+ (re.compile(r"<module>"),(r'?')),
+ (re.compile(r"<type 'exceptions.(\w+)Error'>:"),
+ r'exceptions.\1Error:'),
+
+ #this is a magic to put linefeeds into the doctest
+ #on win it takes one step, linux is crazy about the same...
+ (re.compile('##r##'), r'\r'),
+ (re.compile(r'\r'), '\\\\r\n'),
+
+ (re.compile(r'\d+[.]\d\d\d seconds'), 'N.NNN seconds'),
+ (re.compile(r'\d+[.]\d\d\d s'), 'N.NNN s'),
+ (re.compile(r'\d+[.]\d\d\d{'), 'N.NNN{'),
+ (re.compile('( |")[^\n]+testrunner-ex'), r'\1testrunner-ex'),
+ (re.compile('( |")[^\n]+testrunner.py'), r'\1testrunner.py'),
+ (re.compile(r'> [^\n]*(doc|unit)test[.]py\(\d+\)'),
+ r'\1test.py(NNN)'),
+ (re.compile(r'[.]py\(\d+\)'), r'.py(NNN)'),
+ (re.compile(r'[.]py:\d+'), r'.py:NNN'),
+ (re.compile(r' line \d+,', re.IGNORECASE), r' Line NNN,'),
+ (re.compile(r' line {([a-z]+)}\d+{', re.IGNORECASE), r' Line {\1}NNN{'),
+
+ # omit traceback entries for unittest.py or doctest.py from
+ # output:
+ (re.compile(r'^ +File "[^\n]*(doc|unit)test.py", [^\n]+\n[^\n]+\n',
+ re.MULTILINE),
+ r''),
+ (re.compile(r'^{\w+} +File "{\w+}[^\n]*(doc|unit)test.py{\w+}", [^\n]+\n[^\n]+\n',
+ re.MULTILINE),
+ r''),
+ (re.compile('import pdb; pdb'), 'Pdb()'), # Py 2.3
+ ])
+
def setUp(test):
test.globs['saved-sys-info'] = (
sys.path[:],
@@ -92,6 +150,7 @@
'testrunner-debugging.txt',
'testrunner-edge-cases.txt',
'testrunner-errors.txt',
+ 'testrunner-layers-buff.txt',
'testrunner-layers-ntd.txt',
'testrunner-layers.txt',
'testrunner-layers-api.txt',
@@ -108,7 +167,8 @@
optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE,
checker=checker),
doctest.DocTestSuite('zope.testing.testrunner'),
- doctest.DocTestSuite('zope.testing.testrunner.coverage'),
+ doctest.DocTestSuite('zope.testing.testrunner.coverage',
+ optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE),
doctest.DocTestSuite('zope.testing.testrunner.options'),
doctest.DocTestSuite('zope.testing.testrunner.find'),
]
More information about the checkins
mailing list