blob: 9f68d6b63ef8c8cf5d96fe66934d3228f2a56193 [file] [log] [blame]
Victor Stinnerd2aff602017-05-09 13:57:20 +02001"""
2Tests of regrtest.py.
3
4Note: test_regrtest cannot be run twice in parallel.
5"""
6from __future__ import print_function
7
8import collections
9import os.path
10import platform
11import re
12import subprocess
13import sys
14import sysconfig
15import tempfile
16import textwrap
17import unittest
18from test import support
19
20
21Py_DEBUG = hasattr(sys, 'getobjects')
22ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
23ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR))
24
25TEST_INTERRUPTED = textwrap.dedent("""
26 from signal import SIGINT
27 try:
28 from _testcapi import raise_signal
29 raise_signal(SIGINT)
30 except ImportError:
31 import os
32 os.kill(os.getpid(), SIGINT)
33 """)
34
35
36SubprocssRun = collections.namedtuple('SubprocssRun', 'returncode stdout, stderr')
37
38
39class BaseTestCase(unittest.TestCase):
40 TEST_UNIQUE_ID = 1
41 TESTNAME_PREFIX = 'test_regrtest_'
42 TESTNAME_REGEX = r'test_[a-zA-Z0-9_]+'
43
44 def setUp(self):
45 self.testdir = os.path.realpath(os.path.dirname(__file__))
46
47 self.tmptestdir = tempfile.mkdtemp()
48 self.addCleanup(support.rmtree, self.tmptestdir)
49
50 def create_test(self, name=None, code=''):
51 if not name:
52 name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
53 BaseTestCase.TEST_UNIQUE_ID += 1
54
55 # test_regrtest cannot be run twice in parallel because
56 # of setUp() and create_test()
57 name = self.TESTNAME_PREFIX + name
58 path = os.path.join(self.tmptestdir, name + '.py')
59
60 self.addCleanup(support.unlink, path)
61 # Use 'x' mode to ensure that we do not override existing tests
62 try:
63 fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
64 except PermissionError as exc:
65 if not sysconfig.is_python_build():
66 self.skipTest("cannot write %s: %s" % (path, exc))
67 raise
68 else:
69 with os.fdopen(fd, 'w') as fp:
70 fp.write(code)
71 return name
72
73 def regex_search(self, regex, output):
74 match = re.search(regex, output, re.MULTILINE)
75 if not match:
76 self.fail("%r not found in %r" % (regex, output))
77 return match
78
79 def check_line(self, output, regex):
80 regex = re.compile(r'^' + regex, re.MULTILINE)
81 self.assertRegexpMatches(output, regex)
82
83 def parse_executed_tests(self, output):
84 regex = (r'^\[ *[0-9]+(?:/ *[0-9]+)*\] (%s)'
85 % self.TESTNAME_REGEX)
86 parser = re.finditer(regex, output, re.MULTILINE)
87 return list(match.group(1) for match in parser)
88
89 def check_executed_tests(self, output, tests, skipped=(), failed=(),
90 omitted=(), randomize=False, interrupted=False):
91 if isinstance(tests, str):
92 tests = [tests]
93 if isinstance(skipped, str):
94 skipped = [skipped]
95 if isinstance(failed, str):
96 failed = [failed]
97 if isinstance(omitted, str):
98 omitted = [omitted]
99 ntest = len(tests)
100 nskipped = len(skipped)
101 nfailed = len(failed)
102 nomitted = len(omitted)
103
104 executed = self.parse_executed_tests(output)
105 if randomize:
106 self.assertEqual(set(executed), set(tests), output)
107 else:
108 self.assertEqual(executed, tests, (executed, tests, output))
109
110 def plural(count):
111 return 's' if count != 1 else ''
112
113 def list_regex(line_format, tests):
114 count = len(tests)
115 names = ' '.join(sorted(tests))
116 regex = line_format % (count, plural(count))
117 regex = r'%s:\n %s$' % (regex, names)
118 return regex
119
120 if skipped:
121 regex = list_regex('%s test%s skipped', skipped)
122 self.check_line(output, regex)
123
124 if failed:
125 regex = list_regex('%s test%s failed', failed)
126 self.check_line(output, regex)
127
128 if omitted:
129 regex = list_regex('%s test%s omitted', omitted)
130 self.check_line(output, regex)
131
132 good = ntest - nskipped - nfailed - nomitted
133 if good:
134 regex = r'%s test%s OK\.$' % (good, plural(good))
135 if not skipped and not failed and good > 1:
136 regex = 'All %s' % regex
137 self.check_line(output, regex)
138
139 if interrupted:
140 self.check_line(output, 'Test suite interrupted by signal SIGINT.')
141
142 def parse_random_seed(self, output):
143 match = self.regex_search(r'Using random seed ([0-9]+)', output)
144 randseed = int(match.group(1))
145 self.assertTrue(0 <= randseed <= 10000000, randseed)
146 return randseed
147
148 def run_command(self, args, input=None, exitcode=0, **kw):
149 if not input:
150 input = ''
151 if 'stderr' not in kw:
152 kw['stderr'] = subprocess.PIPE
153 proc = subprocess.Popen(args,
154 universal_newlines=True,
155 stdout=subprocess.PIPE,
156 **kw)
157 stdout, stderr = proc.communicate(input=input)
158 if proc.returncode != exitcode:
159 msg = ("Command %s failed with exit code %s\n"
160 "\n"
161 "stdout:\n"
162 "---\n"
163 "%s\n"
164 "---\n"
165 % (str(args), proc.returncode, stdout))
166 if proc.stderr:
167 msg += ("\n"
168 "stderr:\n"
169 "---\n"
170 "%s"
171 "---\n"
172 % stderr)
173 self.fail(msg)
174 return SubprocssRun(proc.returncode, stdout, stderr)
175
176 def run_python(self, args, **kw):
177 args = [sys.executable] + list(args)
178 proc = self.run_command(args, **kw)
179 return proc.stdout
180
181
182class ProgramsTestCase(BaseTestCase):
183 """
184 Test various ways to run the Python test suite. Use options close
185 to options used on the buildbot.
186 """
187
188 NTEST = 4
189
190 def setUp(self):
191 super(ProgramsTestCase, self).setUp()
192
193 # Create NTEST tests doing nothing
194 self.tests = [self.create_test() for index in range(self.NTEST)]
195
196 self.python_args = ['-Wd', '-E', '-bb']
197 self.regrtest_args = ['-uall', '-rwW',
198 '--testdir=%s' % self.tmptestdir]
199
200 def check_output(self, output):
201 self.parse_random_seed(output)
202 self.check_executed_tests(output, self.tests, randomize=True)
203
204 def run_tests(self, args):
205 output = self.run_python(args)
206 self.check_output(output)
207
208 def test_script_regrtest(self):
209 # Lib/test/regrtest.py
210 script = os.path.join(self.testdir, 'regrtest.py')
211
212 args = self.python_args + [script] + self.regrtest_args + self.tests
213 self.run_tests(args)
214
215 def test_module_test(self):
216 # -m test
217 args = self.python_args + ['-m', 'test'] + self.regrtest_args + self.tests
218 self.run_tests(args)
219
220 def test_module_regrtest(self):
221 # -m test.regrtest
222 args = self.python_args + ['-m', 'test.regrtest'] + self.regrtest_args + self.tests
223 self.run_tests(args)
224
225 def test_module_autotest(self):
226 # -m test.autotest
227 args = self.python_args + ['-m', 'test.autotest'] + self.regrtest_args + self.tests
228 self.run_tests(args)
229
230 def test_module_from_test_autotest(self):
231 # from test import autotest
232 code = 'from test import autotest'
233 args = self.python_args + ['-c', code] + self.regrtest_args + self.tests
234 self.run_tests(args)
235
236 def test_script_autotest(self):
237 # Lib/test/autotest.py
238 script = os.path.join(self.testdir, 'autotest.py')
239 args = self.python_args + [script] + self.regrtest_args + self.tests
240 self.run_tests(args)
241
242 def run_batch(self, *args):
243 proc = self.run_command(args)
244 self.check_output(proc.stdout)
245
246 @unittest.skipUnless(sysconfig.is_python_build(),
247 'test.bat script is not installed')
248 @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
249 def test_tools_buildbot_test(self):
250 # Tools\buildbot\test.bat
251 script = os.path.join(ROOT_DIR, 'Tools', 'buildbot', 'test.bat')
252 test_args = ['--testdir=%s' % self.tmptestdir]
253 if platform.architecture()[0] == '64bit':
254 test_args.append('-x64') # 64-bit build
255 if not Py_DEBUG:
256 test_args.append('+d') # Release build, use python.exe
257
258 args = [script] + test_args + self.tests
259 self.run_batch(*args)
260
261 @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
262 def test_pcbuild_rt(self):
263 # PCbuild\rt.bat
264 script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
265 rt_args = ["-q"] # Quick, don't run tests twice
266 if platform.architecture()[0] == '64bit':
267 rt_args.append('-x64') # 64-bit build
268 if Py_DEBUG:
269 rt_args.append('-d') # Debug build, use python_d.exe
270 args = [script] + rt_args + self.regrtest_args + self.tests
271 self.run_batch(*args)
272
273
274class ArgsTestCase(BaseTestCase):
275 """
276 Test arguments of the Python test suite.
277 """
278
279 def run_tests(self, *testargs, **kw):
280 cmdargs = ('-m', 'test', '--testdir=%s' % self.tmptestdir) + testargs
281 return self.run_python(cmdargs, **kw)
282
283 def test_failing_test(self):
284 # test a failing test
285 code = textwrap.dedent("""
286 import unittest
287 from test import support
288
289 class FailingTest(unittest.TestCase):
290 def test_failing(self):
291 self.fail("bug")
292
293 def test_main():
294 support.run_unittest(FailingTest)
295 """)
296 test_ok = self.create_test('ok')
297 test_failing = self.create_test('failing', code=code)
298 tests = [test_ok, test_failing]
299
300 output = self.run_tests(*tests, exitcode=1)
301 self.check_executed_tests(output, tests, failed=test_failing)
302
303 def test_resources(self):
304 # test -u command line option
305 tests = {}
306 for resource in ('audio', 'network'):
307 code = 'from test import support\nsupport.requires(%r)' % resource
308 tests[resource] = self.create_test(resource, code)
309 test_names = sorted(tests.values())
310
311 # -u all: 2 resources enabled
312 output = self.run_tests('-u', 'all', *test_names)
313 self.check_executed_tests(output, test_names)
314
315 # -u audio: 1 resource enabled
316 output = self.run_tests('-uaudio', *test_names)
317 self.check_executed_tests(output, test_names,
318 skipped=tests['network'])
319
320 # no option: 0 resources enabled
321 output = self.run_tests(*test_names)
322 self.check_executed_tests(output, test_names,
323 skipped=test_names)
324
325 def test_random(self):
326 # test -r and --randseed command line option
327 code = textwrap.dedent("""
328 import random
329 print("TESTRANDOM: %s" % random.randint(1, 1000))
330 """)
331 test = self.create_test('random', code)
332
333 # first run to get the output with the random seed
334 output = self.run_tests('-r', '-v', test)
335 randseed = self.parse_random_seed(output)
336 match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
337 test_random = int(match.group(1))
338
339 # try to reproduce with the random seed
340 output = self.run_tests('-r', '-v', '--randseed=%s' % randseed, test)
341 randseed2 = self.parse_random_seed(output)
342 self.assertEqual(randseed2, randseed)
343
344 match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
345 test_random2 = int(match.group(1))
346 self.assertEqual(test_random2, test_random)
347
348 def test_fromfile(self):
349 # test --fromfile
350 tests = [self.create_test() for index in range(5)]
351
352 # Write the list of files using a format similar to regrtest output:
353 # [1/2] test_1
354 # [2/2] test_2
355 filename = support.TESTFN
356 self.addCleanup(support.unlink, filename)
357
358 # test format 'test_opcodes'
359 with open(filename, "w") as fp:
360 for name in tests:
361 print(name, file=fp)
362
363 output = self.run_tests('--fromfile', filename)
364 self.check_executed_tests(output, tests)
365
366 def test_interrupted(self):
367 code = TEST_INTERRUPTED
368 test = self.create_test('sigint', code=code)
369 output = self.run_tests(test, exitcode=1)
370 self.check_executed_tests(output, test, omitted=test,
371 interrupted=True)
372
373 def test_slow(self):
374 # test --slow
375 tests = [self.create_test() for index in range(3)]
376 output = self.run_tests("--slow", *tests)
377 self.check_executed_tests(output, tests)
378 regex = ('10 slowest tests:\n'
379 '(?:%s: .*\n){%s}'
380 % (self.TESTNAME_REGEX, len(tests)))
381 self.check_line(output, regex)
382
383 def test_forever(self):
384 # test --forever
385 code = textwrap.dedent("""
386 import __builtin__
387 import unittest
388 from test import support
389
390 class ForeverTester(unittest.TestCase):
391 def test_run(self):
392 # Store the state in the __builtin__ module, because the test
393 # module is reload at each run
394 if 'RUN' in __builtin__.__dict__:
395 __builtin__.__dict__['RUN'] += 1
396 if __builtin__.__dict__['RUN'] >= 3:
397 self.fail("fail at the 3rd runs")
398 else:
399 __builtin__.__dict__['RUN'] = 1
400
401 def test_main():
402 support.run_unittest(ForeverTester)
403 """)
404 test = self.create_test('forever', code=code)
405 output = self.run_tests('--forever', test, exitcode=1)
406 self.check_executed_tests(output, [test]*3, failed=test)
407
408 def test_crashed(self):
409 # Any code which causes a crash
410 code = 'import ctypes; ctypes.string_at(0)'
411 crash_test = self.create_test(name="crash", code=code)
412 ok_test = self.create_test(name="ok")
413
414 tests = [crash_test, ok_test]
415 output = self.run_tests("-j2", *tests, exitcode=1)
416 self.check_executed_tests(output, tests, failed=crash_test,
417 randomize=True)
418
419
420def test_main():
421 support.run_unittest(ProgramsTestCase, ArgsTestCase)
422
423
424if __name__ == "__main__":
425 test_main()