Enable pull request test filtering
diff --git a/tools/run_tests/filter_pull_request_tests.py b/tools/run_tests/filter_pull_request_tests.py
index e2027a2..b7ebe20 100644
--- a/tools/run_tests/filter_pull_request_tests.py
+++ b/tools/run_tests/filter_pull_request_tests.py
@@ -77,6 +77,7 @@
# and the value is a list of tests that should be run. An empty list means that
# the changed files should not trigger any tests. Any changed file that does not
# match any of these regexes will trigger all tests
+# DO NOT CHANGE THIS UNLESS YOU KNOW WHAT YOU ARE DOING (be careful even if you do)
_WHITELIST_DICT = {
'^doc/': [],
'^examples/': [],
@@ -174,9 +175,13 @@
print("Finding file differences between gRPC %s branch and pull request...\n" % base_branch)
changed_files = _get_changed_files(base_branch)
for changed_file in changed_files:
- print(changed_file)
+ print(" %s" % changed_file)
print
+ # todo(mattkwong): Remove this
+ # Faking changed files to test test filtering on Jenkins
+ changed_files = ['src/node/something', 'src/python/something']
+
# Regex that combines all keys in _WHITELIST_DICT
all_triggers = "(" + ")|(".join(_WHITELIST_DICT.keys()) + ")"
# Check if all tests have to be run
@@ -188,7 +193,7 @@
for test_suite in _ALL_TEST_SUITES:
if _can_skip_tests(changed_files, test_suite.triggers):
for label in test_suite.labels:
- print(" Filtering %s tests" % label)
+ print(" %s tests safe to skip" % label)
skippable_labels.append(label)
tests = _remove_irrelevant_tests(tests, skippable_labels)
diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py
index b6fb631..b84eb3b 100755
--- a/tools/run_tests/jobset.py
+++ b/tools/run_tests/jobset.py
@@ -96,6 +96,7 @@
'lightgray': [ 37, 0],
'gray': [ 30, 1 ],
'purple': [ 35, 0 ],
+ 'cyan': [ 36, 0 ]
}
@@ -114,6 +115,7 @@
'WAITING': 'yellow',
'SUCCESS': 'green',
'IDLE': 'gray',
+ 'SKIPPED': 'cyan'
}
@@ -450,7 +452,16 @@
travis=False,
infinite_runs=False,
stop_on_failure=False,
- add_env={}):
+ add_env={},
+ skip_jobs=False):
+ if skip_jobs:
+ results = {}
+ skipped_job_result = JobResult()
+ skipped_job_result.state = 'SKIPPED'
+ for job in cmdlines:
+ message('SKIPPED', job.shortname, do_newline=True)
+ results[job.shortname] = [skipped_job_result]
+ return results
js = Jobset(check_cancelled,
maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
newline_on_success, travis, stop_on_failure, add_env)
diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py
index 3e18f36..90055e3 100644
--- a/tools/run_tests/report_utils.py
+++ b/tools/run_tests/report_utils.py
@@ -74,6 +74,8 @@
ET.SubElement(xml_test, 'failure', message='Failure')
elif result.state == 'TIMEOUT':
ET.SubElement(xml_test, 'error', message='Timeout')
+ elif result.state == 'SKIPPED':
+ ET.SubElement(xml_test, 'skipped', message='Skipped')
tree = ET.ElementTree(root)
tree.write(xml_report, encoding='UTF-8')
diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py
index 2656f1a..ae8cb85 100755
--- a/tools/run_tests/run_tests_matrix.py
+++ b/tools/run_tests/run_tests_matrix.py
@@ -292,7 +292,21 @@
print('because only the committed changes in the current branch will be')
print('copied to the docker environment or into subworkspaces.')
-print
+skipped_jobs = []
+if args.filter_pr_tests:
+ print 'Looking for irrelevant tests to skip...'
+ relevant_jobs = filter_tests(jobs, args.base_branch)
+ print
+ if len(relevant_jobs) == len(jobs):
+ print 'No tests will be skipped.'
+ else:
+ print 'These tests will be skipped:'
+ skipped_jobs = set(jobs) - set(relevant_jobs)
+ for job in list(skipped_jobs):
+ print ' %s' % job.shortname
+ jobs = relevant_jobs
+ print
+
print 'Will run these tests:'
for job in jobs:
if args.dry_run:
@@ -301,19 +315,6 @@
print ' %s' % job.shortname
print
-if args.filter_pr_tests:
- print 'IMPORTANT: Test filtering is not active; this is only for testing.'
- relevant_jobs = filter_tests(jobs, args.base_branch)
- # todo(mattkwong): add skipped tests to report.xml
- print
- if len(relevant_jobs) == len(jobs):
- print '(TESTING) No tests will be skipped.'
- else:
- print '(TESTING) These tests will be skipped:'
- for job in list(set(jobs) - set(relevant_jobs)):
- print ' %s' % job.shortname
- print
-
if args.dry_run:
print '--dry_run was used, exiting'
sys.exit(1)
@@ -323,9 +324,15 @@
newline_on_success=True,
travis=True,
maxjobs=args.jobs)
+# Merge skipped tests into results to show skipped tests on report.xml
+if skipped_jobs:
+ skipped_results = jobset.run(skipped_jobs,
+ skip_jobs=True)
+ resultset.update(skipped_results)
report_utils.render_junit_xml_report(resultset, 'report.xml',
suite_name='aggregate_tests')
+
if num_failures == 0:
jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.',
do_newline=True)