Upgrade yapf to 0.20.0
Upgrade yapf version to 0.20.0 and reformat Python files.
diff --git a/tools/run_tests/artifacts/artifact_targets.py b/tools/run_tests/artifacts/artifact_targets.py
index dc0803b..efc4ca0 100644
--- a/tools/run_tests/artifacts/artifact_targets.py
+++ b/tools/run_tests/artifacts/artifact_targets.py
@@ -271,8 +271,8 @@
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(
- self.name,
- 'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
+ self.name, 'tools/dockerfile/grpc_artifact_linux_{}'.format(
+ self.arch),
'tools/run_tests/artifacts/build_artifact_php.sh')
else:
return create_jobspec(
@@ -337,36 +337,38 @@
for Cls in (CSharpExtArtifact, ProtocArtifact)
for platform in ('linux', 'macos', 'windows') for arch in ('x86', 'x64')
] + [
- PythonArtifact('linux', 'x86', 'cp27-cp27m'), PythonArtifact(
- 'linux', 'x86', 'cp27-cp27mu'), PythonArtifact(
- 'linux', 'x86', 'cp34-cp34m'), PythonArtifact(
- 'linux', 'x86', 'cp35-cp35m'), PythonArtifact(
- 'linux', 'x86', 'cp36-cp36m'), PythonArtifact(
- 'linux_extra', 'armv7', '2.7'), PythonArtifact(
- 'linux_extra', 'armv7', '3.4'), PythonArtifact(
- 'linux_extra', 'armv7', '3.5'),
- PythonArtifact('linux_extra', 'armv7', '3.6'), PythonArtifact(
- 'linux_extra', 'armv6', '2.7'), PythonArtifact(
- 'linux_extra', 'armv6', '3.4'), PythonArtifact(
- 'linux_extra', 'armv6', '3.5'), PythonArtifact(
- 'linux_extra', 'armv6', '3.6'), PythonArtifact(
- 'linux', 'x64', 'cp27-cp27m'), PythonArtifact(
- 'linux', 'x64', 'cp27-cp27mu'), PythonArtifact(
- 'linux', 'x64', 'cp34-cp34m'),
- PythonArtifact('linux', 'x64', 'cp35-cp35m'), PythonArtifact(
- 'linux', 'x64', 'cp36-cp36m'), PythonArtifact(
- 'macos', 'x64', 'python2.7'), PythonArtifact(
- 'macos', 'x64', 'python3.4'), PythonArtifact('macos', 'x64',
- 'python3.5'),
- PythonArtifact('macos', 'x64', 'python3.6'), PythonArtifact(
- 'windows', 'x86', 'Python27_32bits'), PythonArtifact(
- 'windows', 'x86', 'Python34_32bits'), PythonArtifact(
- 'windows', 'x86', 'Python35_32bits'), PythonArtifact(
- 'windows', 'x86', 'Python36_32bits'), PythonArtifact(
- 'windows', 'x64', 'Python27'),
- PythonArtifact('windows', 'x64', 'Python34'), PythonArtifact(
- 'windows', 'x64', 'Python35'), PythonArtifact(
- 'windows', 'x64', 'Python36'), RubyArtifact(
- 'linux', 'x64'), RubyArtifact('macos', 'x64'), PHPArtifact(
- 'linux', 'x64'), PHPArtifact('macos', 'x64')
+ PythonArtifact('linux', 'x86', 'cp27-cp27m'),
+ PythonArtifact('linux', 'x86', 'cp27-cp27mu'),
+ PythonArtifact('linux', 'x86', 'cp34-cp34m'),
+ PythonArtifact('linux', 'x86', 'cp35-cp35m'),
+ PythonArtifact('linux', 'x86', 'cp36-cp36m'),
+ PythonArtifact('linux_extra', 'armv7', '2.7'),
+ PythonArtifact('linux_extra', 'armv7', '3.4'),
+ PythonArtifact('linux_extra', 'armv7', '3.5'),
+ PythonArtifact('linux_extra', 'armv7', '3.6'),
+ PythonArtifact('linux_extra', 'armv6', '2.7'),
+ PythonArtifact('linux_extra', 'armv6', '3.4'),
+ PythonArtifact('linux_extra', 'armv6', '3.5'),
+ PythonArtifact('linux_extra', 'armv6', '3.6'),
+ PythonArtifact('linux', 'x64', 'cp27-cp27m'),
+ PythonArtifact('linux', 'x64', 'cp27-cp27mu'),
+ PythonArtifact('linux', 'x64', 'cp34-cp34m'),
+ PythonArtifact('linux', 'x64', 'cp35-cp35m'),
+ PythonArtifact('linux', 'x64', 'cp36-cp36m'),
+ PythonArtifact('macos', 'x64', 'python2.7'),
+ PythonArtifact('macos', 'x64', 'python3.4'),
+ PythonArtifact('macos', 'x64', 'python3.5'),
+ PythonArtifact('macos', 'x64', 'python3.6'),
+ PythonArtifact('windows', 'x86', 'Python27_32bits'),
+ PythonArtifact('windows', 'x86', 'Python34_32bits'),
+ PythonArtifact('windows', 'x86', 'Python35_32bits'),
+ PythonArtifact('windows', 'x86', 'Python36_32bits'),
+ PythonArtifact('windows', 'x64', 'Python27'),
+ PythonArtifact('windows', 'x64', 'Python34'),
+ PythonArtifact('windows', 'x64', 'Python35'),
+ PythonArtifact('windows', 'x64', 'Python36'),
+ RubyArtifact('linux', 'x64'),
+ RubyArtifact('macos', 'x64'),
+ PHPArtifact('linux', 'x64'),
+ PHPArtifact('macos', 'x64')
])
diff --git a/tools/run_tests/artifacts/distribtest_targets.py b/tools/run_tests/artifacts/distribtest_targets.py
index 94a2d53..b2cc16a 100644
--- a/tools/run_tests/artifacts/distribtest_targets.py
+++ b/tools/run_tests/artifacts/distribtest_targets.py
@@ -106,8 +106,8 @@
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
- 'tools/dockerfile/distribtest/csharp_%s_%s' % (
- self.docker_suffix, self.arch),
+ 'tools/dockerfile/distribtest/csharp_%s_%s' %
+ (self.docker_suffix, self.arch),
'test/distrib/csharp/run_distrib_test%s.sh' %
self.script_suffix,
copy_rel_path='test/distrib')
@@ -260,8 +260,8 @@
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(
- self.name, 'tools/dockerfile/distribtest/cpp_%s_%s' % (
- self.docker_suffix, self.arch),
+ self.name, 'tools/dockerfile/distribtest/cpp_%s_%s' %
+ (self.docker_suffix, self.arch),
'test/distrib/cpp/run_distrib_test_%s.sh' % self.testcase)
elif self.platform == 'windows':
return create_jobspec(
diff --git a/tools/run_tests/artifacts/package_targets.py b/tools/run_tests/artifacts/package_targets.py
index 5290845..abf1b5e 100644
--- a/tools/run_tests/artifacts/package_targets.py
+++ b/tools/run_tests/artifacts/package_targets.py
@@ -152,6 +152,9 @@
def targets():
"""Gets list of supported targets"""
return [
- CSharpPackage(), CSharpPackage(linux=True), RubyPackage(),
- PythonPackage(), PHPPackage()
+ CSharpPackage(),
+ CSharpPackage(linux=True),
+ RubyPackage(),
+ PythonPackage(),
+ PHPPackage()
]
diff --git a/tools/run_tests/performance/massage_qps_stats.py b/tools/run_tests/performance/massage_qps_stats.py
index 37f6e7a..790202c 100644
--- a/tools/run_tests/performance/massage_qps_stats.py
+++ b/tools/run_tests/performance/massage_qps_stats.py
@@ -18,8 +18,7 @@
def massage_qps_stats(scenario_result):
- for stats in scenario_result["serverStats"] + scenario_result[
- "clientStats"]:
+ for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
if "coreStats" not in stats: return
core_stats = stats["coreStats"]
del stats["coreStats"]
@@ -294,8 +293,8 @@
core_stats, "cq_ev_queue_transient_pop_failures")
h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size")
stats["core_call_initial_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_call_initial_size_bkts"] = ",".join("%f" % x
- for x in h.boundaries)
+ stats["core_call_initial_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
stats[
"core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
@@ -307,8 +306,8 @@
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"poll_events_returned")
- stats["core_poll_events_returned"] = ",".join("%f" % x
- for x in h.buckets)
+ stats["core_poll_events_returned"] = ",".join(
+ "%f" % x for x in h.buckets)
stats["core_poll_events_returned_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
@@ -322,8 +321,8 @@
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_size")
stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_tcp_write_size_bkts"] = ",".join("%f" % x
- for x in h.boundaries)
+ stats["core_tcp_write_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
stats["core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(
@@ -333,8 +332,8 @@
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_write_iov_size")
stats["core_tcp_write_iov_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_tcp_write_iov_size_bkts"] = ",".join("%f" % x
- for x in h.boundaries)
+ stats["core_tcp_write_iov_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
stats[
"core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
@@ -346,8 +345,8 @@
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
- stats["core_tcp_read_size_bkts"] = ",".join("%f" % x
- for x in h.boundaries)
+ stats["core_tcp_read_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
stats["core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(
@@ -356,8 +355,8 @@
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer")
stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
- stats["core_tcp_read_offer_bkts"] = ",".join("%f" % x
- for x in h.boundaries)
+ stats["core_tcp_read_offer_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
stats["core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(
@@ -366,8 +365,8 @@
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_read_offer_iov_size")
- stats["core_tcp_read_offer_iov_size"] = ",".join("%f" % x
- for x in h.buckets)
+ stats["core_tcp_read_offer_iov_size"] = ",".join(
+ "%f" % x for x in h.buckets)
stats["core_tcp_read_offer_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
@@ -381,8 +380,8 @@
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_message_size")
- stats["core_http2_send_message_size"] = ",".join("%f" % x
- for x in h.buckets)
+ stats["core_http2_send_message_size"] = ",".join(
+ "%f" % x for x in h.buckets)
stats["core_http2_send_message_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
@@ -457,8 +456,8 @@
h = massage_qps_stats_helpers.histogram(core_stats,
"server_cqs_checked")
stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets)
- stats["core_server_cqs_checked_bkts"] = ",".join("%f" % x
- for x in h.boundaries)
+ stats["core_server_cqs_checked_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
stats[
"core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py
index 7af33f9..f057531 100644
--- a/tools/run_tests/performance/scenario_config.py
+++ b/tools/run_tests/performance/scenario_config.py
@@ -178,8 +178,8 @@
# clamp buffer usage to something reasonable (16 gig for now)
MAX_MEMORY_USE = 16 * 1024 * 1024 * 1024
if outstanding_calls * max(req_size, resp_size) > MAX_MEMORY_USE:
- outstanding_calls = max(1, MAX_MEMORY_USE / max(req_size,
- resp_size))
+ outstanding_calls = max(1,
+ MAX_MEMORY_USE / max(req_size, resp_size))
wide = channels if channels is not None else WIDE
deep = int(math.ceil(1.0 * outstanding_calls / wide))
@@ -503,8 +503,8 @@
]:
for synchronicity in ['sync', 'async']:
yield _ping_pong_scenario(
- 'cpp_protobuf_%s_%s_ping_pong_%s' %
- (synchronicity, rpc_type, secstr),
+ 'cpp_protobuf_%s_%s_ping_pong_%s' % (synchronicity,
+ rpc_type, secstr),
rpc_type=rpc_type.upper(),
client_type='%s_CLIENT' % synchronicity.upper(),
server_type='%s_SERVER' % synchronicity.upper(),
@@ -580,10 +580,10 @@
minimal_stack=not secure,
categories=[SWEEP])
- for channels in geometric_progression(1, 20000,
- math.sqrt(10)):
- for outstanding in geometric_progression(1, 200000,
- math.sqrt(10)):
+ for channels in geometric_progression(
+ 1, 20000, math.sqrt(10)):
+ for outstanding in geometric_progression(
+ 1, 200000, math.sqrt(10)):
if synchronicity == 'sync' and outstanding > 1200:
continue
if outstanding < channels: continue
diff --git a/tools/run_tests/python_utils/dockerjob.py b/tools/run_tests/python_utils/dockerjob.py
index d2941c0..2d22dc1 100755
--- a/tools/run_tests/python_utils/dockerjob.py
+++ b/tools/run_tests/python_utils/dockerjob.py
@@ -50,8 +50,8 @@
return int(output.split(':', 2)[1])
except subprocess.CalledProcessError as e:
pass
- raise Exception('Failed to get exposed port %s for container %s.' %
- (port, cid))
+ raise Exception('Failed to get exposed port %s for container %s.' % (port,
+ cid))
def wait_for_healthy(cid, shortname, timeout_seconds):
diff --git a/tools/run_tests/python_utils/filter_pull_request_tests.py b/tools/run_tests/python_utils/filter_pull_request_tests.py
index 8e0dc70..4c09b34 100644
--- a/tools/run_tests/python_utils/filter_pull_request_tests.py
+++ b/tools/run_tests/python_utils/filter_pull_request_tests.py
@@ -124,10 +124,10 @@
"""
# Get file changes between branch and merge-base of specified branch
# Not combined to be Windows friendly
- base_commit = check_output(
- ["git", "merge-base", base_branch, "HEAD"]).rstrip()
- return check_output(
- ["git", "diff", base_commit, "--name-only", "HEAD"]).splitlines()
+ base_commit = check_output(["git", "merge-base", base_branch,
+ "HEAD"]).rstrip()
+ return check_output(["git", "diff", base_commit, "--name-only",
+ "HEAD"]).splitlines()
def _can_skip_tests(file_names, triggers):
diff --git a/tools/run_tests/python_utils/jobset.py b/tools/run_tests/python_utils/jobset.py
index 454d09b..6a33913 100755
--- a/tools/run_tests/python_utils/jobset.py
+++ b/tools/run_tests/python_utils/jobset.py
@@ -133,12 +133,13 @@
logging.info(explanatory_text)
logging.info('%s: %s', tag, msg)
else:
- sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
- _BEGINNING_OF_LINE, _CLEAR_LINE, '\n%s' % explanatory_text
- if explanatory_text is not None else '',
- _COLORS[_TAG_COLOR[tag]][1], _COLORS[_TAG_COLOR[tag]][0],
- tag, msg, '\n'
- if do_newline or explanatory_text is not None else ''))
+ sys.stdout.write(
+ '%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' %
+ (_BEGINNING_OF_LINE, _CLEAR_LINE, '\n%s' % explanatory_text
+ if explanatory_text is not None else '',
+ _COLORS[_TAG_COLOR[tag]][1], _COLORS[_TAG_COLOR[tag]][0],
+ tag, msg, '\n'
+ if do_newline or explanatory_text is not None else ''))
sys.stdout.flush()
return
except IOError, e:
@@ -210,8 +211,8 @@
def __str__(self):
return '%s: %s %s' % (self.shortname, ' '.join(
- '%s=%s' % kv
- for kv in self.environ.items()), ' '.join(self.cmdline))
+ '%s=%s' % kv for kv in self.environ.items()),
+ ' '.join(self.cmdline))
class JobResult(object):
@@ -284,8 +285,9 @@
self._process = try_start()
break
except OSError:
- message('WARNING', 'Failed to start %s, retrying in %f seconds'
- % (self._spec.shortname, delay))
+ message('WARNING',
+ 'Failed to start %s, retrying in %f seconds' %
+ (self._spec.shortname, delay))
time.sleep(delay)
delay *= 2
else:
@@ -343,8 +345,8 @@
if real > 0.5:
cores = (user + sys) / real
self.result.cpu_measured = float('%.01f' % cores)
- self.result.cpu_estimated = float('%.01f' %
- self._spec.cpu_cost)
+ self.result.cpu_estimated = float(
+ '%.01f' % self._spec.cpu_cost)
measurement = '; cpu_cost=%.01f; estimated=%.01f' % (
self.result.cpu_measured, self.result.cpu_estimated)
if not self._quiet_success:
@@ -378,8 +380,8 @@
else:
message(
'TIMEOUT',
- '%s [pid=%d, time=%.1fsec]' %
- (self._spec.shortname, self._process.pid, elapsed),
+ '%s [pid=%d, time=%.1fsec]' % (self._spec.shortname,
+ self._process.pid, elapsed),
stdout(),
do_newline=True)
self.kill()
diff --git a/tools/run_tests/python_utils/start_port_server.py b/tools/run_tests/python_utils/start_port_server.py
index 5572cdc..37995ac 100644
--- a/tools/run_tests/python_utils/start_port_server.py
+++ b/tools/run_tests/python_utils/start_port_server.py
@@ -43,16 +43,16 @@
if running:
current_version = int(
subprocess.check_output([
- sys.executable, os.path.abspath(
- 'tools/run_tests/python_utils/port_server.py'),
+ sys.executable,
+ os.path.abspath('tools/run_tests/python_utils/port_server.py'),
'dump_version'
]))
logging.info('my port server is version %d', current_version)
running = (version >= current_version)
if not running:
logging.info('port_server version mismatch: killing the old one')
- urllib.urlopen('http://localhost:%d/quitquitquit' %
- _PORT_SERVER_PORT).read()
+ urllib.urlopen(
+ 'http://localhost:%d/quitquitquit' % _PORT_SERVER_PORT).read()
time.sleep(1)
if not running:
fd, logfile = tempfile.mkstemp()
@@ -61,7 +61,8 @@
args = [
sys.executable,
os.path.abspath('tools/run_tests/python_utils/port_server.py'),
- '-p', '%d' % _PORT_SERVER_PORT, '-l', logfile
+ '-p',
+ '%d' % _PORT_SERVER_PORT, '-l', logfile
]
env = dict(os.environ)
env['BUILD_ID'] = 'pleaseDontKillMeJenkins'
@@ -91,8 +92,8 @@
# try one final time: maybe another build managed to start one
time.sleep(1)
try:
- urllib.urlopen('http://localhost:%d/get' %
- _PORT_SERVER_PORT).read()
+ urllib.urlopen(
+ 'http://localhost:%d/get' % _PORT_SERVER_PORT).read()
logging.info(
'last ditch attempt to contact port server succeeded')
break
diff --git a/tools/run_tests/run_build_statistics.py b/tools/run_tests/run_build_statistics.py
index 4af00a4..4055332 100755
--- a/tools/run_tests/run_build_statistics.py
+++ b/tools/run_tests/run_build_statistics.py
@@ -61,7 +61,7 @@
}
_URL_BASE = 'https://grpc-testing.appspot.com/job'
-# This is a dynamic list where known and active issues should be added.
+# This is a dynamic list where known and active issues should be added.
# Fixed ones should be removed.
# Also try not to add multiple messages from the same failure.
_KNOWN_ERRORS = [
@@ -106,8 +106,8 @@
'description': known_error,
'count': this_error_count
})
- print('====> %d failures due to %s' %
- (this_error_count, known_error))
+ print('====> %d failures due to %s' % (this_error_count,
+ known_error))
return error_list
@@ -116,8 +116,9 @@
def _get_last_processed_buildnumber(build_name):
- query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (
- _PROJECT_ID, _DATASET_ID, build_name)
+ query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (_PROJECT_ID,
+ _DATASET_ID,
+ build_name)
query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query)
page = bq.jobs().getQueryResults(
pageToken=None, **query_job['jobReference']).execute(num_retries=3)
@@ -167,8 +168,8 @@
html = urllib.urlopen(console_url).read()
build_result['pass_count'] = 0
build_result['failure_count'] = 1
- # In this case, the string doesn't exist in the result html but the fact
- # that we fail to parse the result html indicates Jenkins failure and hence
+ # In this case, the string doesn't exist in the result html but the fact
+ # that we fail to parse the result html indicates Jenkins failure and hence
# no report files were generated.
build_result['no_report_files_found'] = True
error_list = _scrape_for_known_errors(html)
@@ -223,7 +224,7 @@
if build.get_status() == 'ABORTED':
continue
# If any build is still running, stop processing this job. Next time, we
- # start from where it was left so that all builds are processed
+ # start from where it was left so that all builds are processed
# sequentially.
if build.is_running():
print('====> Build %d is still running.' % build_number)
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index 99f4298..44a6ec2 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -685,7 +685,8 @@
cmdargs = [
'--server_host=%s' % server_host_detail[0],
'--server_host_override=%s' % server_host_detail[1],
- '--server_port=443', '--use_tls=true', '--test_case=%s' % test_case
+ '--server_port=443', '--use_tls=true',
+ '--test_case=%s' % test_case
]
environ = dict(language.cloud_to_prod_env(), **language.global_env())
if auth:
@@ -696,18 +697,19 @@
cwd = language.client_cwd
if docker_image:
- container_name = dockerjob.random_name('interop_client_%s' %
- language.safename)
+ container_name = dockerjob.random_name(
+ 'interop_client_%s' % language.safename)
cmdline = docker_run_cmdline(
cmdline,
image=docker_image,
cwd=cwd,
environ=environ,
- docker_args=['--net=host', '--name=%s' % container_name])
+ docker_args=['--net=host',
+ '--name=%s' % container_name])
if manual_cmd_log is not None:
if manual_cmd_log == []:
- manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
- docker_image)
+ manual_cmd_log.append(
+ 'echo "Testing ${docker_image:=%s}"' % docker_image)
manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
cwd = None
environ = None
@@ -775,18 +777,19 @@
environ = language.global_env()
if docker_image and language.safename != 'objc':
# we can't run client in docker for objc.
- container_name = dockerjob.random_name('interop_client_%s' %
- language.safename)
+ container_name = dockerjob.random_name(
+ 'interop_client_%s' % language.safename)
cmdline = docker_run_cmdline(
cmdline,
image=docker_image,
environ=environ,
cwd=cwd,
- docker_args=['--net=host', '--name=%s' % container_name])
+ docker_args=['--net=host',
+ '--name=%s' % container_name])
if manual_cmd_log is not None:
if manual_cmd_log == []:
- manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
- docker_image)
+ manual_cmd_log.append(
+ 'echo "Testing ${docker_image:=%s}"' % docker_image)
manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
cwd = None
@@ -807,12 +810,12 @@
def server_jobspec(language, docker_image, insecure=False, manual_cmd_log=None):
"""Create jobspec for running a server"""
- container_name = dockerjob.random_name('interop_server_%s' %
- language.safename)
+ container_name = dockerjob.random_name(
+ 'interop_server_%s' % language.safename)
cmdline = bash_cmdline(
language.server_cmd([
- '--port=%s' % _DEFAULT_SERVER_PORT, '--use_tls=%s' % (
- 'false' if insecure else 'true')
+ '--port=%s' % _DEFAULT_SERVER_PORT,
+ '--use_tls=%s' % ('false' if insecure else 'true')
]))
environ = language.global_env()
docker_args = ['--name=%s' % container_name]
@@ -821,9 +824,9 @@
# with the server port. These ports are used for http2 interop test
# (one test case per port).
docker_args += list(
- itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i))
- for i in range(
- len(_HTTP2_SERVER_TEST_CASES))))
+ itertools.chain.from_iterable(
+ ('-p', str(_DEFAULT_SERVER_PORT + i))
+ for i in range(len(_HTTP2_SERVER_TEST_CASES))))
# Enable docker's healthcheck mechanism.
# This runs a Python script inside the container every second. The script
# pings the http2 server to verify it is ready. The 'health-retries' flag
@@ -834,8 +837,8 @@
# command line.
docker_args += [
'--health-cmd=python test/http2_test/http2_server_health_check.py '
- '--server_host=%s --server_port=%d' %
- ('localhost', _DEFAULT_SERVER_PORT),
+ '--server_host=%s --server_port=%d' % ('localhost',
+ _DEFAULT_SERVER_PORT),
'--health-interval=1s',
'--health-retries=5',
'--health-timeout=10s',
@@ -852,8 +855,8 @@
docker_args=docker_args)
if manual_cmd_log is not None:
if manual_cmd_log == []:
- manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
- docker_image)
+ manual_cmd_log.append(
+ 'echo "Testing ${docker_image:=%s}"' % docker_image)
manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image))
server_job = jobset.JobSpec(
cmdline=docker_cmdline,
@@ -974,7 +977,8 @@
'--override_server',
action='append',
type=lambda kv: kv.split('='),
- help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
+ help=
+ 'Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
default=[])
argp.add_argument(
'-t', '--travis', default=False, action='store_const', const=True)
@@ -993,7 +997,8 @@
default=False,
action='store_const',
const=True,
- help='Allow flaky tests to show as passing (re-runs failed tests up to five times)'
+ help=
+ 'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
)
argp.add_argument(
'--manual_run',
@@ -1014,7 +1019,8 @@
default=False,
action='store_const',
const=True,
- help='Enable HTTP/2 server edge case testing. (Includes positive and negative tests'
+ help=
+ 'Enable HTTP/2 server edge case testing. (Includes positive and negative tests'
)
argp.add_argument(
'--insecure',
@@ -1039,8 +1045,8 @@
servers = set(
s
- for s in itertools.chain.from_iterable(_SERVERS if x == 'all' else [x]
- for x in args.server))
+ for s in itertools.chain.from_iterable(
+ _SERVERS if x == 'all' else [x] for x in args.server))
if args.use_docker:
if not args.travis:
@@ -1067,10 +1073,9 @@
# we want to include everything but objc in 'all'
# because objc won't run on non-mac platforms
all_but_objc = set(six.iterkeys(_LANGUAGES)) - set(['objc'])
-languages = set(
- _LANGUAGES[l]
- for l in itertools.chain.from_iterable(all_but_objc if x == 'all' else [x]
- for x in args.language))
+languages = set(_LANGUAGES[l]
+ for l in itertools.chain.from_iterable(
+ all_but_objc if x == 'all' else [x] for x in args.language))
languages_http2_clients_for_http2_server_interop = set()
if args.http2_server_interop:
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index 561217c..4e4d05c 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -92,8 +92,9 @@
benchmarks.append(
jobset.JobSpec(
[
- 'bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' %
- line, '--benchmark_min_time=0.05'
+ 'bins/basicprof/%s' % bm_name,
+ '--benchmark_filter=^%s$' % line,
+ '--benchmark_min_time=0.05'
],
environ={'LATENCY_TRACE': '%s.trace' % fnize(line)},
shortname='profile-%s' % fnize(line)))
@@ -102,8 +103,9 @@
[
sys.executable,
'tools/profiling/latency_profile/profile_analyzer.py',
- '--source', '%s.trace' % fnize(line), '--fmt', 'simple',
- '--out', 'reports/%s.txt' % fnize(line)
+ '--source',
+ '%s.trace' % fnize(line), '--fmt', 'simple', '--out',
+ 'reports/%s.txt' % fnize(line)
],
timeout_seconds=20 * 60,
shortname='analyze-%s' % fnize(line)))
@@ -116,7 +118,8 @@
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(
- benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
+ benchmarks, maxjobs=max(1,
+ multiprocessing.cpu_count() / 2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
@@ -145,8 +148,9 @@
benchmarks.append(
jobset.JobSpec(
[
- 'perf', 'record', '-o', '%s-perf.data' % fnize(
- line), '-g', '-F', '997', 'bins/mutrace/%s' % bm_name,
+ 'perf', 'record', '-o',
+ '%s-perf.data' % fnize(line), '-g', '-F', '997',
+ 'bins/mutrace/%s' % bm_name,
'--benchmark_filter=^%s$' % line, '--benchmark_min_time=10'
],
shortname='perf-%s' % fnize(line)))
@@ -183,12 +187,14 @@
def run_summary(bm_name, cfg, base_json_name):
subprocess.check_call([
- 'make', bm_name, 'CONFIG=%s' % cfg, '-j',
+ 'make', bm_name,
+ 'CONFIG=%s' % cfg, '-j',
'%d' % multiprocessing.cpu_count()
])
cmd = [
- 'bins/%s/%s' % (cfg, bm_name), '--benchmark_out=%s.%s.json' %
- (base_json_name, cfg), '--benchmark_out_format=json'
+ 'bins/%s/%s' % (cfg, bm_name),
+ '--benchmark_out=%s.%s.json' % (base_json_name, cfg),
+ '--benchmark_out_format=json'
]
if args.summary_time is not None:
cmd += ['--benchmark_min_time=%d' % args.summary_time]
@@ -205,10 +211,12 @@
f.write(
subprocess.check_output([
'tools/profiling/microbenchmarks/bm2bq.py',
- '%s.counters.json' % bm_name, '%s.opt.json' % bm_name
+ '%s.counters.json' % bm_name,
+ '%s.opt.json' % bm_name
]))
subprocess.check_call([
- 'bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name
+ 'bq', 'load', 'microbenchmarks.microbenchmarks',
+ '%s.csv' % bm_name
])
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index 03b684b..9a9f74e 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -100,7 +100,8 @@
jobspec = jobset.JobSpec(
cmdline=cmdline,
shortname=shortname,
- timeout_seconds=worker_timeout, # workers get restarted after each scenario
+ timeout_seconds=
+ worker_timeout, # workers get restarted after each scenario
verbose_success=True)
return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
@@ -298,7 +299,8 @@
port=worker[1] + language.worker_port_offset(),
remote_host=worker[0],
perf_cmd=perf_cmd)
- for language in languages for worker_idx, worker in enumerate(workers)
+ for language in languages
+ for worker_idx, worker in enumerate(workers)
]
@@ -367,10 +369,10 @@
workers = workers_by_lang[str(language)][:]
# 'SERVER_LANGUAGE' is an indicator for this script to pick
# a server in different language.
- custom_server_lang = scenario_json.get('SERVER_LANGUAGE',
- None)
- custom_client_lang = scenario_json.get('CLIENT_LANGUAGE',
- None)
+ custom_server_lang = scenario_json.get(
+ 'SERVER_LANGUAGE', None)
+ custom_client_lang = scenario_json.get(
+ 'CLIENT_LANGUAGE', None)
scenario_json = scenario_config.remove_nonproto_fields(
scenario_json)
if custom_server_lang and custom_client_lang:
@@ -480,8 +482,8 @@
argp.add_argument(
'--remote_driver_host',
default=None,
- help='Run QPS driver on given host. By default, QPS driver is run locally.'
- )
+ help=
+ 'Run QPS driver on given host. By default, QPS driver is run locally.')
argp.add_argument(
'--remote_worker_host',
nargs='+',
@@ -560,7 +562,8 @@
'--flame_graph_reports',
default='perf_reports',
type=str,
- help='Name of directory to output flame graph profiles to, if any are created.'
+ help=
+ 'Name of directory to output flame graph profiles to, if any are created.'
)
argp.add_argument(
'-u',
@@ -662,15 +665,16 @@
six.iteritems(resultset)))
finally:
# Consider qps workers that need to be killed as failures
- qps_workers_killed += finish_qps_workers(scenario.workers,
- qpsworker_jobs)
+ qps_workers_killed += finish_qps_workers(
+ scenario.workers, qpsworker_jobs)
if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
workers_and_base_names = {}
for worker in scenario.workers:
if not worker.perf_file_base_name:
raise Exception(
- 'using perf buf perf report filename is unspecified')
+ 'using perf buf perf report filename is unspecified'
+ )
workers_and_base_names[
worker.host_and_port] = worker.perf_file_base_name
perf_report_failures += run_collect_perf_profile_jobs(
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 3aa9eb8..c8e917f 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -182,15 +182,15 @@
js = json.load(f)
return [
tgt for tgt in js
- if tgt['language'] == test_lang and platform_string() in tgt[
- platforms_str] and not (travis and tgt['flaky'])
+ if tgt['language'] == test_lang and platform_string() in
+ tgt[platforms_str] and not (travis and tgt['flaky'])
]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
- raise Exception('Compiler %s not supported (on this platform).' %
- compiler)
+ raise Exception(
+ 'Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
@@ -263,9 +263,9 @@
self.config = config
self.args = args
if self.platform == 'windows':
- _check_compiler(self.args.compiler, [
- 'default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
- ])
+ _check_compiler(
+ self.args.compiler,
+ ['default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'])
_check_arch(self.args.arch, ['default', 'x64', 'x86'])
self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
@@ -305,9 +305,9 @@
# cmake doesn't build boringssl tests
continue
auto_timeout_scaling = target.get('auto_timeout_scaling', True)
- polling_strategies = (
- _POLLING_STRATEGIES.get(self.platform, ['all'])
- if target.get('uses_polling', True) else ['none'])
+ polling_strategies = (_POLLING_STRATEGIES.get(
+ self.platform, ['all']) if target.get('uses_polling', True) else
+ ['none'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
@@ -377,7 +377,8 @@
for line in tests.split('\n'):
test = line.strip()
if not test: continue
- cmdline = [binary, '--benchmark_filter=%s$' % test
+ cmdline = [binary,
+ '--benchmark_filter=%s$' % test
] + target['args']
out.append(
self.config.job_spec(
@@ -408,7 +409,8 @@
assert base is not None
assert line[1] == ' '
test = base + line.strip()
- cmdline = [binary, '--gtest_filter=%s' % test
+ cmdline = [binary,
+ '--gtest_filter=%s' % test
] + target['args']
out.append(
self.config.job_spec(
@@ -445,8 +447,8 @@
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return [
- 'buildtests_%s' % self.make_target, 'tools_%s' % self.make_target,
- 'check_epollexclusive'
+ 'buildtests_%s' % self.make_target,
+ 'tools_%s' % self.make_target, 'check_epollexclusive'
]
def make_options(self):
@@ -480,14 +482,18 @@
def _clang_make_options(self, version_suffix=''):
return [
- 'CC=clang%s' % version_suffix, 'CXX=clang++%s' % version_suffix,
- 'LD=clang%s' % version_suffix, 'LDXX=clang++%s' % version_suffix
+ 'CC=clang%s' % version_suffix,
+ 'CXX=clang++%s' % version_suffix,
+ 'LD=clang%s' % version_suffix,
+ 'LDXX=clang++%s' % version_suffix
]
def _gcc_make_options(self, version_suffix):
return [
- 'CC=gcc%s' % version_suffix, 'CXX=g++%s' % version_suffix,
- 'LD=gcc%s' % version_suffix, 'LDXX=g++%s' % version_suffix
+ 'CC=gcc%s' % version_suffix,
+ 'CXX=g++%s' % version_suffix,
+ 'LD=gcc%s' % version_suffix,
+ 'LDXX=g++%s' % version_suffix
]
def _compiler_options(self, use_docker, compiler):
@@ -700,8 +706,8 @@
environ=dict(
list(environment.items()) + [(
'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
- shortname='%s.test.%s' % (config.name, suite_name),)
- for suite_name in tests_json for config in self.pythons
+ shortname='%s.test.%s' % (config.name, suite_name),
+ ) for suite_name in tests_json for config in self.pythons
]
def pre_build_steps(self):
@@ -801,7 +807,10 @@
if os.name == 'nt':
return (python35_config,)
else:
- return (python27_config, python34_config,)
+ return (
+ python27_config,
+ python34_config,
+ )
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
@@ -817,8 +826,12 @@
elif args.compiler == 'python_alpine':
return (python27_config,)
elif args.compiler == 'all_the_cpythons':
- return (python27_config, python34_config, python35_config,
- python36_config,)
+ return (
+ python27_config,
+ python34_config,
+ python35_config,
+ python36_config,
+ )
else:
raise Exception('Compiler %s not supported.' % args.compiler)
@@ -921,13 +934,15 @@
specs = []
for assembly in six.iterkeys(tests_by_assembly):
- assembly_file = 'src/csharp/%s/%s/%s%s' % (
- assembly, assembly_subdir, assembly, assembly_extension)
+ assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
+ assembly_subdir,
+ assembly,
+ assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
- cmdline = runtime_cmd + [assembly_file, '--test=%s' % test
- ] + nunit_args
+ cmdline = runtime_cmd + [assembly_file,
+ '--test=%s' % test] + nunit_args
specs.append(
self.config.job_spec(
cmdline,
@@ -1147,8 +1162,8 @@
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
- _CONFIGS = dict((cfg['config'], Config(**cfg))
- for cfg in ast.literal_eval(f.read()))
+ _CONFIGS = dict(
+ (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
@@ -1298,13 +1313,15 @@
default=False,
action='store_const',
const=True,
- help='Allow flaky tests to show as passing (re-runs failed tests up to five times)'
+ help=
+ 'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
)
argp.add_argument(
'--arch',
choices=['default', 'x86', 'x64'],
default='default',
- help='Selects architecture to target. For some platforms "default" is the only supported choice.'
+ help=
+ 'Selects architecture to target. For some platforms "default" is the only supported choice.'
)
argp.add_argument(
'--compiler',
@@ -1316,7 +1333,8 @@
'cmake_vs2015', 'cmake_vs2017'
],
default='default',
- help='Selects compiler to use. Allowed values depend on the platform and language.'
+ help=
+ 'Selects compiler to use. Allowed values depend on the platform and language.'
)
argp.add_argument(
'--iomgr_platform',
@@ -1339,7 +1357,8 @@
'--update_submodules',
default=[],
nargs='*',
- help='Update some submodules before building. If any are updated, also run generate_projects. '
+ help=
+ 'Update some submodules before building. If any are updated, also run generate_projects. '
+
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
)
@@ -1360,7 +1379,8 @@
default=False,
action='store_const',
const=True,
- help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+ help=
+ 'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+ 'Useful when running many iterations of each test (argument -n).')
argp.add_argument(
'--force_default_poller',
@@ -1399,8 +1419,8 @@
if test.flaky: flaky_tests.add(test.name)
if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
except:
- print("Unexpected error getting flaky tests: %s" %
- traceback.format_exc())
+ print(
+ "Unexpected error getting flaky tests: %s" % traceback.format_exc())
if args.force_default_poller:
_POLLING_STRATEGIES = {}
@@ -1473,7 +1493,8 @@
language_make_options = list(
set([
make_option
- for lang in languages for make_option in lang.make_options()
+ for lang in languages
+ for make_option in lang.make_options()
]))
if args.use_docker:
@@ -1530,8 +1551,8 @@
return [
jobset.JobSpec(
[
- 'cmake', '--build', '.', '--target', '%s' % target,
- '--config', _MSBUILD_CONFIG[cfg]
+ 'cmake', '--build', '.', '--target',
+ '%s' % target, '--config', _MSBUILD_CONFIG[cfg]
],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets
@@ -1541,8 +1562,8 @@
# With cmake, we've passed all the build configuration in the pre-build step already
return [
jobset.JobSpec(
- [os.getenv('MAKE', 'make'), '-j', '%d' % args.jobs] +
- targets,
+ [os.getenv('MAKE', 'make'), '-j',
+ '%d' % args.jobs] + targets,
cwd='cmake/build',
timeout_seconds=None)
]
@@ -1550,10 +1571,11 @@
return [
jobset.JobSpec(
[
- os.getenv('MAKE', 'make'), '-f', makefile, '-j', '%d' %
- args.jobs,
+ os.getenv('MAKE', 'make'), '-f', makefile, '-j',
+ '%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
- args.slowdown, 'CONFIG=%s' % cfg, 'Q='
+ args.slowdown,
+ 'CONFIG=%s' % cfg, 'Q='
] + language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
timeout_seconds=None)
@@ -1565,8 +1587,8 @@
make_targets = {}
for l in languages:
makefile = l.makefile_name()
- make_targets[makefile] = make_targets.get(
- makefile, set()).union(set(l.make_targets()))
+ make_targets[makefile] = make_targets.get(makefile, set()).union(
+ set(l.make_targets()))
def build_step_environ(cfg):
@@ -1581,7 +1603,8 @@
set(
jobset.JobSpec(
cmdline, environ=build_step_environ(build_config), flake_retries=2)
- for l in languages for cmdline in l.pre_build_steps()))
+ for l in languages
+ for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(
make_jobspec(build_config, list(targets), makefile)
@@ -1593,12 +1616,14 @@
cmdline,
environ=build_step_environ(build_config),
timeout_seconds=None)
- for l in languages for cmdline in l.build_steps()))
+ for l in languages
+ for cmdline in l.build_steps()))
post_tests_steps = list(
set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
- for l in languages for cmdline in l.post_tests_steps()))
+ for l in languages
+ for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
@@ -1612,8 +1637,8 @@
except:
pass
else:
- urllib.request.urlopen('http://localhost:%d/quitquitquit' %
- legacy_server_port).read()
+ urllib.request.urlopen(
+ 'http://localhost:%d/quitquitquit' % legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
@@ -1679,8 +1704,8 @@
return []
if not args.travis and not _has_epollexclusive() and platform_string(
- ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
- platform_string()]:
+ ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string(
+ )]:
print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
_POLLING_STRATEGIES[platform_string()].remove('epollex')
@@ -1694,12 +1719,11 @@
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
- one_run = set(spec
- for language in languages
- for spec in language.test_specs()
- if (re.search(args.regex, spec.shortname) and (
- args.regex_exclude == '' or not re.search(
- args.regex_exclude, spec.shortname))))
+ one_run = set(
+ spec for language in languages for spec in language.test_specs()
+ if (re.search(args.regex, spec.shortname) and
+ (args.regex_exclude == '' or
+ not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis and args.max_time <= 0:
@@ -1722,8 +1746,9 @@
if infinite_runs:
assert len(massaged_one_run
) > 0, 'Must have at least one test for a -n inf run'
- runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
- else itertools.repeat(massaged_one_run, runs_per_test))
+ runs_sequence = (itertools.repeat(massaged_one_run)
+ if infinite_runs else itertools.repeat(
+ massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
@@ -1750,8 +1775,8 @@
else:
jobset.message(
'FLAKE',
- '%s [%d/%d runs flaked]' %
- (k, num_failures, num_runs),
+ '%s [%d/%d runs flaked]' % (k, num_failures,
+ num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py
index 49be8f1..ac90bef 100755
--- a/tools/run_tests/run_tests_matrix.py
+++ b/tools/run_tests/run_tests_matrix.py
@@ -65,8 +65,10 @@
test_job = jobset.JobSpec(
cmdline=[
'python', 'tools/run_tests/run_tests.py', '--use_docker', '-t',
- '-j', str(inner_jobs), '-x', _report_filename(name),
- '--report_suite_name', '%s' % name
+ '-j',
+ str(inner_jobs), '-x',
+ _report_filename(name), '--report_suite_name',
+ '%s' % name
] + runtests_args,
environ=runtests_envs,
shortname='run_tests_%s' % name,
@@ -90,8 +92,10 @@
test_job = jobset.JobSpec(
cmdline=[
'bash', 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
- '-t', '-j', str(inner_jobs), '-x', '../%s' % _report_filename(name),
- '--report_suite_name', '%s' % name
+ '-t', '-j',
+ str(inner_jobs), '-x',
+ '../%s' % _report_filename(name), '--report_suite_name',
+ '%s' % name
] + runtests_args,
environ=env,
shortname='run_tests_%s' % name,
@@ -492,8 +496,8 @@
jobs = []
for job in all_jobs:
- if not args.filter or all(filter in job.labels
- for filter in args.filter):
+ if not args.filter or all(
+ filter in job.labels for filter in args.filter):
if not any(exclude_label in job.labels
for exclude_label in args.exclude):
jobs.append(job)
diff --git a/tools/run_tests/sanity/check_test_filtering.py b/tools/run_tests/sanity/check_test_filtering.py
index c2a6399..ebbb1a9 100755
--- a/tools/run_tests/sanity/check_test_filtering.py
+++ b/tools/run_tests/sanity/check_test_filtering.py
@@ -80,7 +80,8 @@
if (label in job.labels):
jobs_matching_labels += 1
self.assertEquals(
- len(filtered_jobs), len(all_jobs) - jobs_matching_labels)
+ len(filtered_jobs),
+ len(all_jobs) - jobs_matching_labels)
def test_individual_language_filters(self):
# Changing unlisted file should trigger all languages