[2.7] bpo-30523, bpo-30764, bpo-30776: Sync regrtest from master (#2444)
* bpo-30523: regrtest --list-cases --match (#2401)
* regrtest --list-cases now supports --match and --match-file options.
Example: ./python -m test --list-cases -m FileTests test_os
* --list-cases now also sets support.verbose to False to prevent
messages to stdout when loading test modules.
* Add support._match_test() private function.
(cherry picked from commit ace56d583664f855d89d1219ece7c21c2fddcf30)
(cherry picked from commit 36946c06a3bdb1104c53f0fe535086572ffbdd9e)
* bpo-30764: regrtest: add --fail-env-changed option (#2402)
* bpo-30764: regrtest: change exit code on failure
* Exit code 2 if failed tests ("bad")
* Exit code 3 if interrupted
* bpo-30764: regrtest: add --fail-env-changed option
If the option is set, mark a test as failed if it alters the
environment, for example if it creates a file without removing it.
(cherry picked from commit 63f54c68936d648c70ca411661e4208329edcf26)
(cherry picked from commit 1f33857a3677de84bdc6d80c39f577eb081ed85d)
* bpo-30776: reduce regrtest -R false positives (#2422)
* Change the regrtest --huntrleaks checker to decide if a test file
leaks or not. Require that each run leaks at least 1 reference.
* Warmup runs are now completely ignored: ignored in the checker test
and not used anymore to compute the sum.
* Add an unit test for a reference leak.
Example of reference differences previously considered a failure
(leak) and now considered as success (success, no leak):
[3, 0, 0]
[0, 1, 0]
[8, -8, 1]
(cherry picked from commit 48b5c422ffb03affb00c184b9a99e5537be92732)
(cherry picked from commit e0f8b43a46850aa5e5992465cab684496f102fe5)
diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py
index 3bc3fa6..aadbf3b 100755
--- a/Lib/test/regrtest.py
+++ b/Lib/test/regrtest.py
@@ -67,6 +67,8 @@
don't execute them
--list-cases -- only write the name of test cases that will be run,
don't execute them
+--fail-env-changed -- if a test file alters the environment, mark the test
+ as failed
Additional Option Details:
@@ -327,7 +329,7 @@
'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
'multiprocess=', 'slaveargs=', 'forever', 'header', 'pgo',
'failfast', 'match=', 'testdir=', 'list-tests', 'list-cases',
- 'coverage', 'matchfile='])
+ 'coverage', 'matchfile=', 'fail-env-changed'])
except getopt.error, msg:
usage(2, msg)
@@ -339,6 +341,7 @@
slaveargs = None
list_tests = False
list_cases_opt = False
+ fail_env_changed = False
for o, a in opts:
if o in ('-h', '--help'):
usage(0)
@@ -439,6 +442,8 @@
list_tests = True
elif o == '--list-cases':
list_cases_opt = True
+ elif o == '--fail-env-changed':
+ fail_env_changed = True
else:
print >>sys.stderr, ("No handler for option {}. Please "
"report this as a bug at http://bugs.python.org.").format(o)
@@ -558,7 +563,7 @@
sys.exit(0)
if list_cases_opt:
- list_cases(testdir, selected)
+ list_cases(testdir, selected, match_tests)
sys.exit(0)
if trace:
@@ -908,11 +913,19 @@
result = "FAILURE"
elif interrupted:
result = "INTERRUPTED"
+ elif environment_changed and fail_env_changed:
+ result = "ENV CHANGED"
else:
result = "SUCCESS"
print("Tests result: %s" % result)
- sys.exit(len(bad) > 0 or interrupted)
+ if bad:
+ sys.exit(2)
+ if interrupted:
+ sys.exit(130)
+ if fail_env_changed and environment_changed:
+ sys.exit(3)
+ sys.exit(0)
STDTESTS = [
@@ -1310,7 +1323,18 @@
if i >= nwarmup:
deltas.append(rc_after - rc_before)
print >> sys.stderr
- if any(deltas):
+
+ # bpo-30776: Try to ignore false positives:
+ #
+ # [3, 0, 0]
+ # [0, 1, 0]
+ # [8, -8, 1]
+ #
+ # Expected leaks:
+ #
+ # [5, 5, 6]
+ # [10, 1, 1]
+ if all(delta >= 1 for delta in deltas):
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
print >> sys.stderr, msg
with open(fname, "a") as refrep:
@@ -1501,9 +1525,13 @@
if isinstance(test, unittest.TestSuite):
_list_cases(test)
elif isinstance(test, unittest.TestCase):
- print(test.id())
+ if test_support._match_test(test):
+ print(test.id())
-def list_cases(testdir, selected):
+def list_cases(testdir, selected, match_tests):
+ test_support.verbose = False
+ test_support.match_tests = match_tests
+
skipped = []
for test in selected:
abstest = get_abs_module(testdir, test)