blob: af9b1d5924f600d7420223c2be114d265431728e [file] [log] [blame]
Victor Stinnerd2aff602017-05-09 13:57:20 +02001"""
2Tests of regrtest.py.
3
4Note: test_regrtest cannot be run twice in parallel.
5"""
6from __future__ import print_function
7
8import collections
Victor Stinner453a6852017-05-09 17:06:34 +02009import errno
Victor Stinnerd2aff602017-05-09 13:57:20 +020010import os.path
11import platform
12import re
13import subprocess
14import sys
15import sysconfig
16import tempfile
17import textwrap
18import unittest
19from test import support
20
21
22Py_DEBUG = hasattr(sys, 'getobjects')
23ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
24ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR))
25
26TEST_INTERRUPTED = textwrap.dedent("""
27 from signal import SIGINT
28 try:
29 from _testcapi import raise_signal
30 raise_signal(SIGINT)
31 except ImportError:
32 import os
33 os.kill(os.getpid(), SIGINT)
34 """)
35
36
Victor Stinner453a6852017-05-09 17:06:34 +020037SubprocessRun = collections.namedtuple('SubprocessRun',
38 'returncode stdout stderr')
Victor Stinnerd2aff602017-05-09 13:57:20 +020039
40
41class BaseTestCase(unittest.TestCase):
42 TEST_UNIQUE_ID = 1
43 TESTNAME_PREFIX = 'test_regrtest_'
44 TESTNAME_REGEX = r'test_[a-zA-Z0-9_]+'
45
46 def setUp(self):
47 self.testdir = os.path.realpath(os.path.dirname(__file__))
48
49 self.tmptestdir = tempfile.mkdtemp()
50 self.addCleanup(support.rmtree, self.tmptestdir)
51
52 def create_test(self, name=None, code=''):
53 if not name:
54 name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
55 BaseTestCase.TEST_UNIQUE_ID += 1
56
57 # test_regrtest cannot be run twice in parallel because
58 # of setUp() and create_test()
59 name = self.TESTNAME_PREFIX + name
60 path = os.path.join(self.tmptestdir, name + '.py')
61
62 self.addCleanup(support.unlink, path)
Victor Stinner453a6852017-05-09 17:06:34 +020063 # Use O_EXCL to ensure that we do not override existing tests
Victor Stinnerd2aff602017-05-09 13:57:20 +020064 try:
65 fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
Victor Stinner453a6852017-05-09 17:06:34 +020066 except OSError as exc:
67 if (exc.errno in (errno.EACCES, errno.EPERM)
68 and not sysconfig.is_python_build()):
Victor Stinnerd2aff602017-05-09 13:57:20 +020069 self.skipTest("cannot write %s: %s" % (path, exc))
Victor Stinner453a6852017-05-09 17:06:34 +020070 else:
71 raise
Victor Stinnerd2aff602017-05-09 13:57:20 +020072 else:
73 with os.fdopen(fd, 'w') as fp:
74 fp.write(code)
75 return name
76
77 def regex_search(self, regex, output):
78 match = re.search(regex, output, re.MULTILINE)
79 if not match:
80 self.fail("%r not found in %r" % (regex, output))
81 return match
82
83 def check_line(self, output, regex):
84 regex = re.compile(r'^' + regex, re.MULTILINE)
85 self.assertRegexpMatches(output, regex)
86
87 def parse_executed_tests(self, output):
Victor Stinner453a6852017-05-09 17:06:34 +020088 regex = (r'^[0-9]+:[0-9]+:[0-9]+ \[ *[0-9]+(?:/ *[0-9]+)*\] (%s)'
Victor Stinnerd2aff602017-05-09 13:57:20 +020089 % self.TESTNAME_REGEX)
90 parser = re.finditer(regex, output, re.MULTILINE)
91 return list(match.group(1) for match in parser)
92
93 def check_executed_tests(self, output, tests, skipped=(), failed=(),
94 omitted=(), randomize=False, interrupted=False):
95 if isinstance(tests, str):
96 tests = [tests]
97 if isinstance(skipped, str):
98 skipped = [skipped]
99 if isinstance(failed, str):
100 failed = [failed]
101 if isinstance(omitted, str):
102 omitted = [omitted]
103 ntest = len(tests)
104 nskipped = len(skipped)
105 nfailed = len(failed)
106 nomitted = len(omitted)
107
108 executed = self.parse_executed_tests(output)
109 if randomize:
110 self.assertEqual(set(executed), set(tests), output)
111 else:
112 self.assertEqual(executed, tests, (executed, tests, output))
113
114 def plural(count):
115 return 's' if count != 1 else ''
116
117 def list_regex(line_format, tests):
118 count = len(tests)
119 names = ' '.join(sorted(tests))
120 regex = line_format % (count, plural(count))
121 regex = r'%s:\n %s$' % (regex, names)
122 return regex
123
124 if skipped:
125 regex = list_regex('%s test%s skipped', skipped)
126 self.check_line(output, regex)
127
128 if failed:
129 regex = list_regex('%s test%s failed', failed)
130 self.check_line(output, regex)
131
132 if omitted:
133 regex = list_regex('%s test%s omitted', omitted)
134 self.check_line(output, regex)
135
136 good = ntest - nskipped - nfailed - nomitted
137 if good:
138 regex = r'%s test%s OK\.$' % (good, plural(good))
139 if not skipped and not failed and good > 1:
140 regex = 'All %s' % regex
141 self.check_line(output, regex)
142
143 if interrupted:
144 self.check_line(output, 'Test suite interrupted by signal SIGINT.')
145
Victor Stinner453a6852017-05-09 17:06:34 +0200146 if nfailed:
147 result = 'FAILURE'
148 elif interrupted:
149 result = 'INTERRUPTED'
150 else:
151 result = 'SUCCESS'
152 self.check_line(output, 'Tests result: %s' % result)
153
Victor Stinnerd2aff602017-05-09 13:57:20 +0200154 def parse_random_seed(self, output):
155 match = self.regex_search(r'Using random seed ([0-9]+)', output)
156 randseed = int(match.group(1))
157 self.assertTrue(0 <= randseed <= 10000000, randseed)
158 return randseed
159
160 def run_command(self, args, input=None, exitcode=0, **kw):
161 if not input:
162 input = ''
163 if 'stderr' not in kw:
164 kw['stderr'] = subprocess.PIPE
165 proc = subprocess.Popen(args,
166 universal_newlines=True,
167 stdout=subprocess.PIPE,
168 **kw)
169 stdout, stderr = proc.communicate(input=input)
170 if proc.returncode != exitcode:
171 msg = ("Command %s failed with exit code %s\n"
172 "\n"
173 "stdout:\n"
174 "---\n"
175 "%s\n"
176 "---\n"
177 % (str(args), proc.returncode, stdout))
178 if proc.stderr:
179 msg += ("\n"
180 "stderr:\n"
181 "---\n"
182 "%s"
183 "---\n"
184 % stderr)
185 self.fail(msg)
Victor Stinner453a6852017-05-09 17:06:34 +0200186 return SubprocessRun(proc.returncode, stdout, stderr)
Victor Stinnerd2aff602017-05-09 13:57:20 +0200187
188 def run_python(self, args, **kw):
189 args = [sys.executable] + list(args)
190 proc = self.run_command(args, **kw)
191 return proc.stdout
192
193
194class ProgramsTestCase(BaseTestCase):
195 """
196 Test various ways to run the Python test suite. Use options close
197 to options used on the buildbot.
198 """
199
200 NTEST = 4
201
202 def setUp(self):
203 super(ProgramsTestCase, self).setUp()
204
205 # Create NTEST tests doing nothing
206 self.tests = [self.create_test() for index in range(self.NTEST)]
207
Victor Stinner453a6852017-05-09 17:06:34 +0200208 self.python_args = ['-Wd', '-3', '-E', '-bb', '-tt']
Victor Stinnerd2aff602017-05-09 13:57:20 +0200209 self.regrtest_args = ['-uall', '-rwW',
210 '--testdir=%s' % self.tmptestdir]
211
212 def check_output(self, output):
213 self.parse_random_seed(output)
214 self.check_executed_tests(output, self.tests, randomize=True)
215
216 def run_tests(self, args):
217 output = self.run_python(args)
218 self.check_output(output)
219
220 def test_script_regrtest(self):
221 # Lib/test/regrtest.py
222 script = os.path.join(self.testdir, 'regrtest.py')
223
224 args = self.python_args + [script] + self.regrtest_args + self.tests
225 self.run_tests(args)
226
227 def test_module_test(self):
228 # -m test
229 args = self.python_args + ['-m', 'test'] + self.regrtest_args + self.tests
230 self.run_tests(args)
231
232 def test_module_regrtest(self):
233 # -m test.regrtest
234 args = self.python_args + ['-m', 'test.regrtest'] + self.regrtest_args + self.tests
235 self.run_tests(args)
236
237 def test_module_autotest(self):
238 # -m test.autotest
239 args = self.python_args + ['-m', 'test.autotest'] + self.regrtest_args + self.tests
240 self.run_tests(args)
241
242 def test_module_from_test_autotest(self):
243 # from test import autotest
244 code = 'from test import autotest'
245 args = self.python_args + ['-c', code] + self.regrtest_args + self.tests
246 self.run_tests(args)
247
248 def test_script_autotest(self):
249 # Lib/test/autotest.py
250 script = os.path.join(self.testdir, 'autotest.py')
251 args = self.python_args + [script] + self.regrtest_args + self.tests
252 self.run_tests(args)
253
254 def run_batch(self, *args):
255 proc = self.run_command(args)
256 self.check_output(proc.stdout)
257
Victor Stinnera5bb6242017-05-11 11:30:23 +0200258 def need_pcbuild(self):
259 exe = os.path.normpath(os.path.abspath(sys.executable))
260 parts = exe.split(os.path.sep)
261 if len(parts) < 3:
262 # it's not a python build, python is likely to be installed
263 return
264
265 build_dir = parts[-3]
266 if build_dir.lower() != 'pcbuild':
267 self.skipTest("Tools/buildbot/test.bat requires PCbuild build, "
268 "found %s" % build_dir)
269
Victor Stinnerf2e894c2017-05-12 11:31:08 +0200270 @unittest.skipUnless(sysconfig.is_python_build(),
271 'test.bat script is not installed')
Victor Stinnerd2aff602017-05-09 13:57:20 +0200272 @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
273 def test_tools_buildbot_test(self):
Victor Stinnera5bb6242017-05-11 11:30:23 +0200274 self.need_pcbuild()
275
Victor Stinnerd2aff602017-05-09 13:57:20 +0200276 # Tools\buildbot\test.bat
277 script = os.path.join(ROOT_DIR, 'Tools', 'buildbot', 'test.bat')
278 test_args = ['--testdir=%s' % self.tmptestdir]
279 if platform.architecture()[0] == '64bit':
280 test_args.append('-x64') # 64-bit build
281 if not Py_DEBUG:
282 test_args.append('+d') # Release build, use python.exe
283
284 args = [script] + test_args + self.tests
285 self.run_batch(*args)
286
287 @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
288 def test_pcbuild_rt(self):
Victor Stinnera5bb6242017-05-11 11:30:23 +0200289 self.need_pcbuild()
290
Victor Stinnerd2aff602017-05-09 13:57:20 +0200291 # PCbuild\rt.bat
292 script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
293 rt_args = ["-q"] # Quick, don't run tests twice
294 if platform.architecture()[0] == '64bit':
295 rt_args.append('-x64') # 64-bit build
296 if Py_DEBUG:
297 rt_args.append('-d') # Debug build, use python_d.exe
298 args = [script] + rt_args + self.regrtest_args + self.tests
299 self.run_batch(*args)
300
301
302class ArgsTestCase(BaseTestCase):
303 """
304 Test arguments of the Python test suite.
305 """
306
307 def run_tests(self, *testargs, **kw):
308 cmdargs = ('-m', 'test', '--testdir=%s' % self.tmptestdir) + testargs
309 return self.run_python(cmdargs, **kw)
310
311 def test_failing_test(self):
312 # test a failing test
313 code = textwrap.dedent("""
314 import unittest
315 from test import support
316
317 class FailingTest(unittest.TestCase):
318 def test_failing(self):
319 self.fail("bug")
320
321 def test_main():
322 support.run_unittest(FailingTest)
323 """)
324 test_ok = self.create_test('ok')
325 test_failing = self.create_test('failing', code=code)
326 tests = [test_ok, test_failing]
327
328 output = self.run_tests(*tests, exitcode=1)
329 self.check_executed_tests(output, tests, failed=test_failing)
330
331 def test_resources(self):
332 # test -u command line option
333 tests = {}
334 for resource in ('audio', 'network'):
335 code = 'from test import support\nsupport.requires(%r)' % resource
336 tests[resource] = self.create_test(resource, code)
337 test_names = sorted(tests.values())
338
339 # -u all: 2 resources enabled
340 output = self.run_tests('-u', 'all', *test_names)
341 self.check_executed_tests(output, test_names)
342
343 # -u audio: 1 resource enabled
344 output = self.run_tests('-uaudio', *test_names)
345 self.check_executed_tests(output, test_names,
346 skipped=tests['network'])
347
348 # no option: 0 resources enabled
349 output = self.run_tests(*test_names)
350 self.check_executed_tests(output, test_names,
351 skipped=test_names)
352
353 def test_random(self):
354 # test -r and --randseed command line option
355 code = textwrap.dedent("""
356 import random
357 print("TESTRANDOM: %s" % random.randint(1, 1000))
358 """)
359 test = self.create_test('random', code)
360
361 # first run to get the output with the random seed
362 output = self.run_tests('-r', '-v', test)
363 randseed = self.parse_random_seed(output)
364 match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
365 test_random = int(match.group(1))
366
367 # try to reproduce with the random seed
368 output = self.run_tests('-r', '-v', '--randseed=%s' % randseed, test)
369 randseed2 = self.parse_random_seed(output)
370 self.assertEqual(randseed2, randseed)
371
372 match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
373 test_random2 = int(match.group(1))
374 self.assertEqual(test_random2, test_random)
375
376 def test_fromfile(self):
377 # test --fromfile
378 tests = [self.create_test() for index in range(5)]
379
380 # Write the list of files using a format similar to regrtest output:
381 # [1/2] test_1
382 # [2/2] test_2
383 filename = support.TESTFN
384 self.addCleanup(support.unlink, filename)
385
386 # test format 'test_opcodes'
387 with open(filename, "w") as fp:
388 for name in tests:
389 print(name, file=fp)
390
391 output = self.run_tests('--fromfile', filename)
392 self.check_executed_tests(output, tests)
393
394 def test_interrupted(self):
395 code = TEST_INTERRUPTED
396 test = self.create_test('sigint', code=code)
397 output = self.run_tests(test, exitcode=1)
398 self.check_executed_tests(output, test, omitted=test,
399 interrupted=True)
400
Victor Stinner453a6852017-05-09 17:06:34 +0200401 def test_slowest(self):
Victor Stinnerd2aff602017-05-09 13:57:20 +0200402 # test --slow
403 tests = [self.create_test() for index in range(3)]
Victor Stinner453a6852017-05-09 17:06:34 +0200404 output = self.run_tests("--slowest", *tests)
Victor Stinnerd2aff602017-05-09 13:57:20 +0200405 self.check_executed_tests(output, tests)
406 regex = ('10 slowest tests:\n'
Victor Stinner453a6852017-05-09 17:06:34 +0200407 '(?:- %s: .*\n){%s}'
Victor Stinnerd2aff602017-05-09 13:57:20 +0200408 % (self.TESTNAME_REGEX, len(tests)))
409 self.check_line(output, regex)
410
Victor Stinnera5bb6242017-05-11 11:30:23 +0200411 def test_slow_interrupted(self):
412 # Issue #25373: test --slowest with an interrupted test
413 code = TEST_INTERRUPTED
414 test = self.create_test("sigint", code=code)
415
416 try:
417 import threading
418 tests = (False, True)
419 except ImportError:
420 tests = (False,)
421 for multiprocessing in tests:
422 if multiprocessing:
423 args = ("--slowest", "-j2", test)
424 else:
425 args = ("--slowest", test)
426 output = self.run_tests(*args, exitcode=1)
427 self.check_executed_tests(output, test,
428 omitted=test, interrupted=True)
429
430 regex = ('10 slowest tests:\n')
431 self.check_line(output, regex)
432
433 def test_coverage(self):
434 # test --coverage
435 test = self.create_test('coverage')
436 output = self.run_tests("--coverage", test)
437 self.check_executed_tests(output, [test])
438 regex = (r'lines +cov% +module +\(path\)\n'
439 r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
440 self.check_line(output, regex)
441
Victor Stinnerd2aff602017-05-09 13:57:20 +0200442 def test_forever(self):
443 # test --forever
444 code = textwrap.dedent("""
445 import __builtin__
446 import unittest
447 from test import support
448
449 class ForeverTester(unittest.TestCase):
450 def test_run(self):
451 # Store the state in the __builtin__ module, because the test
452 # module is reload at each run
453 if 'RUN' in __builtin__.__dict__:
454 __builtin__.__dict__['RUN'] += 1
455 if __builtin__.__dict__['RUN'] >= 3:
456 self.fail("fail at the 3rd runs")
457 else:
458 __builtin__.__dict__['RUN'] = 1
459
460 def test_main():
461 support.run_unittest(ForeverTester)
462 """)
463 test = self.create_test('forever', code=code)
464 output = self.run_tests('--forever', test, exitcode=1)
465 self.check_executed_tests(output, [test]*3, failed=test)
466
Victor Stinner453a6852017-05-09 17:06:34 +0200467 def test_list_tests(self):
468 # test --list-tests
469 tests = [self.create_test() for i in range(5)]
470 output = self.run_tests('--list-tests', *tests)
471 self.assertEqual(output.rstrip().splitlines(),
472 tests)
473
Victor Stinnerd2aff602017-05-09 13:57:20 +0200474 def test_crashed(self):
475 # Any code which causes a crash
476 code = 'import ctypes; ctypes.string_at(0)'
477 crash_test = self.create_test(name="crash", code=code)
478 ok_test = self.create_test(name="ok")
479
480 tests = [crash_test, ok_test]
481 output = self.run_tests("-j2", *tests, exitcode=1)
482 self.check_executed_tests(output, tests, failed=crash_test,
483 randomize=True)
484
Victor Stinner24c2c202017-06-16 17:30:03 +0200485 def parse_methods(self, output):
486 regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
487 return [match.group(1) for match in regex.finditer(output)]
488
489 def test_matchfile(self):
490 # Any code which causes a crash
491 code = textwrap.dedent("""
492 import unittest
493 from test import support
494
495 class Tests(unittest.TestCase):
496 def test_method1(self):
497 pass
498 def test_method2(self):
499 pass
500 def test_method3(self):
501 pass
502 def test_method4(self):
503 pass
504
505 def test_main():
506 support.run_unittest(Tests)
507 """)
508 all_methods = ['test_method1', 'test_method2',
509 'test_method3', 'test_method4']
510 testname = self.create_test(code=code)
511
512 # by default, all methods should be run
513 output = self.run_tests("-v", testname)
514 methods = self.parse_methods(output)
515 self.assertEqual(methods, all_methods)
516
517 # only run a subset
518 filename = support.TESTFN
519 self.addCleanup(support.unlink, filename)
520
521 subset = [
522 # only match the method name
523 'test_method1',
524 # match the full identifier
525 '%s.Tests.test_method3' % testname]
526 with open(filename, "w") as fp:
527 for name in subset:
528 print(name, file=fp)
529
530 output = self.run_tests("-v", "--matchfile", filename, testname)
531 methods = self.parse_methods(output)
532 subset = ['test_method1', 'test_method3']
533 self.assertEqual(methods, subset)
534
535 def test_list_cases(self):
536 # test --list-cases
537 code = textwrap.dedent("""
538 import unittest
539 from test import support
540
541 class Tests(unittest.TestCase):
542 def test_method1(self):
543 pass
544 def test_method2(self):
545 pass
546
547 def test_main():
548 support.run_unittest(Tests)
549 """)
550 testname = self.create_test(code=code)
551 all_methods = ['%s.Tests.test_method1' % testname,
552 '%s.Tests.test_method2' % testname]
553 output = self.run_tests('--list-cases', testname)
554 self.assertEqual(output.splitlines(), all_methods)
555
Victor Stinnerd2aff602017-05-09 13:57:20 +0200556
557def test_main():
558 support.run_unittest(ProgramsTestCase, ArgsTestCase)
559
560
561if __name__ == "__main__":
562 test_main()