Merge pull request #13771 from yang-g/delete

Mark tcp errors as UNAVAILABLE
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 094e43e..cb32281 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -4,3 +4,4 @@
 /**/OWNERS @markdroth @nicolasnoble @ctiller
 /bazel/** @nicolasnoble @dgquintas @ctiller
 /src/core/ext/filters/client_channel/** @markdroth @dgquintas @ctiller
+/tools/run_tests/performance/** @ncteisen @matt-kwong @ctiller
diff --git a/BUILD b/BUILD
index 1384319..dba6592 100644
--- a/BUILD
+++ b/BUILD
@@ -44,11 +44,11 @@
 )
 
 # This should be updated along with build.yaml
-g_stands_for = "generous"
+g_stands_for = "glossy"
 
 core_version = "5.0.0-dev"
 
-version = "1.8.0-dev"
+version = "1.9.0-dev"
 
 GPR_PUBLIC_HDRS = [
     "include/grpc/support/alloc.h",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 22a1ab7..49eb38f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -224,6 +224,7 @@
     add_subdirectory(${BORINGSSL_ROOT_DIR} third_party/boringssl)
     if(TARGET ssl)
       set(_gRPC_SSL_LIBRARIES ssl)
+      set(_gRPC_SSL_INCLUDE_DIR ${BORINGSSL_ROOT_DIR}/include)
     endif()
   else()
       message(WARNING "gRPC_SSL_PROVIDER is \"module\" but BORINGSSL_ROOT_DIR is wrong")
@@ -235,7 +236,7 @@
 elseif("${gRPC_SSL_PROVIDER}" STREQUAL "package")
   find_package(OpenSSL REQUIRED)
   set(_gRPC_SSL_LIBRARIES ${OPENSSL_LIBRARIES})
-  include_directories(${OPENSSL_INCLUDE_DIR})
+  set(_gRPC_SSL_INCLUDE_DIR ${OPENSSL_INCLUDE_DIR})
   set(_gRPC_FIND_SSL "if(NOT OPENSSL_FOUND)\n  find_package(OpenSSL)\nendif()")
 endif()
 
@@ -358,12 +359,12 @@
 add_custom_target(tools_c
   DEPENDS
   check_epollexclusive
-  gen_hpack_tables
-  gen_legal_metadata_characters
-  gen_percent_encoding_tables
   grpc_create_jwt
   grpc_print_google_default_creds_token
   grpc_verify_jwt
+  gen_hpack_tables
+  gen_legal_metadata_characters
+  gen_percent_encoding_tables
 )
 
 add_custom_target(tools_cxx
@@ -847,7 +848,7 @@
 target_include_directories(gpr
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -939,7 +940,7 @@
 target_include_directories(gpr_test_util
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -1226,7 +1227,7 @@
 target_include_directories(grpc
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -1540,7 +1541,7 @@
 target_include_directories(grpc_cronet
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -1828,7 +1829,7 @@
 target_include_directories(grpc_test_util
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -2098,7 +2099,7 @@
 target_include_directories(grpc_test_util_unsecure
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -2386,7 +2387,7 @@
 target_include_directories(grpc_unsecure
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -2478,7 +2479,7 @@
 target_include_directories(reconnect_server
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -2520,7 +2521,7 @@
 target_include_directories(test_tcp_server
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -2601,7 +2602,7 @@
 target_include_directories(grpc++
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -2804,7 +2805,7 @@
 target_include_directories(grpc++_core_stats
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3086,7 +3087,7 @@
 target_include_directories(grpc++_cronet
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3288,7 +3289,7 @@
 target_include_directories(grpc++_error_details
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3353,7 +3354,7 @@
 target_include_directories(grpc++_proto_reflection_desc_db
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3414,7 +3415,7 @@
 target_include_directories(grpc++_reflection
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3472,7 +3473,7 @@
 target_include_directories(grpc++_test_config
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3550,7 +3551,7 @@
 target_include_directories(grpc++_test_util
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3691,7 +3692,7 @@
 target_include_directories(grpc++_test_util_unsecure
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3834,7 +3835,7 @@
 target_include_directories(grpc++_unsecure
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4027,7 +4028,7 @@
 target_include_directories(grpc_benchmark
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4086,7 +4087,7 @@
 target_include_directories(grpc_cli_libs
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4146,7 +4147,7 @@
 target_include_directories(grpc_plugin_support
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4224,7 +4225,7 @@
 target_include_directories(http2_client_main
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4279,7 +4280,7 @@
 target_include_directories(interop_client_helper
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4349,7 +4350,7 @@
 target_include_directories(interop_client_main
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4400,7 +4401,7 @@
 target_include_directories(interop_server_helper
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4469,7 +4470,7 @@
 target_include_directories(interop_server_lib
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4520,7 +4521,7 @@
 target_include_directories(interop_server_main
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4608,7 +4609,7 @@
 target_include_directories(qps
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4655,7 +4656,7 @@
 target_include_directories(grpc_csharp_ext
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4683,92 +4684,6 @@
 
 if (gRPC_BUILD_TESTS)
 
-add_library(ares
-  third_party/cares/cares/ares__close_sockets.c
-  third_party/cares/cares/ares__get_hostent.c
-  third_party/cares/cares/ares__read_line.c
-  third_party/cares/cares/ares__timeval.c
-  third_party/cares/cares/ares_cancel.c
-  third_party/cares/cares/ares_create_query.c
-  third_party/cares/cares/ares_data.c
-  third_party/cares/cares/ares_destroy.c
-  third_party/cares/cares/ares_expand_name.c
-  third_party/cares/cares/ares_expand_string.c
-  third_party/cares/cares/ares_fds.c
-  third_party/cares/cares/ares_free_hostent.c
-  third_party/cares/cares/ares_free_string.c
-  third_party/cares/cares/ares_getenv.c
-  third_party/cares/cares/ares_gethostbyaddr.c
-  third_party/cares/cares/ares_gethostbyname.c
-  third_party/cares/cares/ares_getnameinfo.c
-  third_party/cares/cares/ares_getopt.c
-  third_party/cares/cares/ares_getsock.c
-  third_party/cares/cares/ares_init.c
-  third_party/cares/cares/ares_library_init.c
-  third_party/cares/cares/ares_llist.c
-  third_party/cares/cares/ares_mkquery.c
-  third_party/cares/cares/ares_nowarn.c
-  third_party/cares/cares/ares_options.c
-  third_party/cares/cares/ares_parse_a_reply.c
-  third_party/cares/cares/ares_parse_aaaa_reply.c
-  third_party/cares/cares/ares_parse_mx_reply.c
-  third_party/cares/cares/ares_parse_naptr_reply.c
-  third_party/cares/cares/ares_parse_ns_reply.c
-  third_party/cares/cares/ares_parse_ptr_reply.c
-  third_party/cares/cares/ares_parse_soa_reply.c
-  third_party/cares/cares/ares_parse_srv_reply.c
-  third_party/cares/cares/ares_parse_txt_reply.c
-  third_party/cares/cares/ares_platform.c
-  third_party/cares/cares/ares_process.c
-  third_party/cares/cares/ares_query.c
-  third_party/cares/cares/ares_search.c
-  third_party/cares/cares/ares_send.c
-  third_party/cares/cares/ares_strcasecmp.c
-  third_party/cares/cares/ares_strdup.c
-  third_party/cares/cares/ares_strerror.c
-  third_party/cares/cares/ares_timeout.c
-  third_party/cares/cares/ares_version.c
-  third_party/cares/cares/ares_writev.c
-  third_party/cares/cares/bitncmp.c
-  third_party/cares/cares/inet_net_pton.c
-  third_party/cares/cares/inet_ntop.c
-  third_party/cares/cares/windows_port.c
-)
-
-if(WIN32 AND MSVC)
-  set_target_properties(ares PROPERTIES COMPILE_PDB_NAME "ares"
-    COMPILE_PDB_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}"
-  )
-  if (gRPC_INSTALL)
-    install(FILES ${CMAKE_CURRENT_BINARY_DIR}/ares.pdb
-      DESTINATION ${gRPC_INSTALL_LIBDIR} OPTIONAL
-    )
-  endif()
-endif()
-
-
-target_include_directories(ares
-  PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
-  PRIVATE ${PROTOBUF_ROOT_DIR}/src
-  PRIVATE ${ZLIB_INCLUDE_DIR}
-  PRIVATE ${BENCHMARK}/include
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
-  PRIVATE ${CARES_INCLUDE_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
-)
-
-target_link_libraries(ares
-  ${_gRPC_SSL_LIBRARIES}
-  ${_gRPC_ALLTARGETS_LIBRARIES}
-)
-
-
-endif (gRPC_BUILD_TESTS)
-if (gRPC_BUILD_TESTS)
-
 add_library(bad_client_test
   test/core/bad_client/bad_client.cc
 )
@@ -4788,7 +4703,7 @@
 target_include_directories(bad_client_test
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4829,7 +4744,7 @@
 target_include_directories(bad_ssl_test_server
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4930,7 +4845,7 @@
 target_include_directories(end2end_tests
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -5031,7 +4946,7 @@
 target_include_directories(end2end_nosec_tests
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -5062,7 +4977,7 @@
 target_include_directories(alarm_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5091,7 +5006,7 @@
 target_include_directories(algorithm_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5120,7 +5035,7 @@
 target_include_directories(alloc_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5147,7 +5062,7 @@
 target_include_directories(alpn_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5176,7 +5091,7 @@
 target_include_directories(arena_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5203,7 +5118,7 @@
 target_include_directories(backoff_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5232,7 +5147,7 @@
 target_include_directories(bad_server_response_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5262,7 +5177,7 @@
 target_include_directories(bin_decoder_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5289,7 +5204,7 @@
 target_include_directories(bin_encoder_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5316,7 +5231,7 @@
 target_include_directories(byte_stream_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5345,7 +5260,7 @@
 target_include_directories(channel_create_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5373,7 +5288,7 @@
 target_include_directories(check_epollexclusive
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5408,7 +5323,7 @@
 target_include_directories(chttp2_hpack_encoder_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5437,7 +5352,7 @@
 target_include_directories(chttp2_stream_map_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5466,7 +5381,7 @@
 target_include_directories(chttp2_varint_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5495,7 +5410,7 @@
 target_include_directories(combiner_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5524,7 +5439,7 @@
 target_include_directories(compression_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5553,7 +5468,7 @@
 target_include_directories(concurrent_connectivity_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5582,7 +5497,7 @@
 target_include_directories(connection_refused_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5611,7 +5526,7 @@
 target_include_directories(dns_resolver_connectivity_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5640,7 +5555,7 @@
 target_include_directories(dns_resolver_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5670,7 +5585,7 @@
 target_include_directories(dualstack_socket_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5700,7 +5615,7 @@
 target_include_directories(endpoint_pair_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5729,7 +5644,7 @@
 target_include_directories(error_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5759,7 +5674,7 @@
 target_include_directories(ev_epollsig_linux_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5789,7 +5704,7 @@
 target_include_directories(fake_resolver_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5820,7 +5735,7 @@
 target_include_directories(fake_transport_security_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5850,7 +5765,7 @@
 target_include_directories(fd_conservation_posix_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5881,7 +5796,7 @@
 target_include_directories(fd_posix_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5911,7 +5826,7 @@
 target_include_directories(fling_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5940,7 +5855,7 @@
 target_include_directories(fling_server
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5970,7 +5885,7 @@
 target_include_directories(fling_stream_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6001,7 +5916,7 @@
 target_include_directories(fling_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6021,104 +5936,6 @@
 
 endif()
 endif (gRPC_BUILD_TESTS)
-
-add_executable(gen_hpack_tables
-  tools/codegen/core/gen_hpack_tables.c
-)
-
-
-target_include_directories(gen_hpack_tables
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
-  PRIVATE ${PROTOBUF_ROOT_DIR}/src
-  PRIVATE ${BENCHMARK_ROOT_DIR}/include
-  PRIVATE ${ZLIB_ROOT_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
-  PRIVATE ${CARES_INCLUDE_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
-)
-
-target_link_libraries(gen_hpack_tables
-  ${_gRPC_ALLTARGETS_LIBRARIES}
-  gpr
-  grpc
-)
-
-
-if (gRPC_INSTALL)
-  install(TARGETS gen_hpack_tables EXPORT gRPCTargets
-    RUNTIME DESTINATION ${gRPC_INSTALL_BINDIR}
-    LIBRARY DESTINATION ${gRPC_INSTALL_LIBDIR}
-    ARCHIVE DESTINATION ${gRPC_INSTALL_LIBDIR}
-  )
-endif()
-
-
-add_executable(gen_legal_metadata_characters
-  tools/codegen/core/gen_legal_metadata_characters.c
-)
-
-
-target_include_directories(gen_legal_metadata_characters
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
-  PRIVATE ${PROTOBUF_ROOT_DIR}/src
-  PRIVATE ${BENCHMARK_ROOT_DIR}/include
-  PRIVATE ${ZLIB_ROOT_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
-  PRIVATE ${CARES_INCLUDE_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
-)
-
-target_link_libraries(gen_legal_metadata_characters
-  ${_gRPC_ALLTARGETS_LIBRARIES}
-)
-
-
-if (gRPC_INSTALL)
-  install(TARGETS gen_legal_metadata_characters EXPORT gRPCTargets
-    RUNTIME DESTINATION ${gRPC_INSTALL_BINDIR}
-    LIBRARY DESTINATION ${gRPC_INSTALL_LIBDIR}
-    ARCHIVE DESTINATION ${gRPC_INSTALL_LIBDIR}
-  )
-endif()
-
-
-add_executable(gen_percent_encoding_tables
-  tools/codegen/core/gen_percent_encoding_tables.c
-)
-
-
-target_include_directories(gen_percent_encoding_tables
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
-  PRIVATE ${PROTOBUF_ROOT_DIR}/src
-  PRIVATE ${BENCHMARK_ROOT_DIR}/include
-  PRIVATE ${ZLIB_ROOT_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
-  PRIVATE ${CARES_INCLUDE_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
-)
-
-target_link_libraries(gen_percent_encoding_tables
-  ${_gRPC_ALLTARGETS_LIBRARIES}
-)
-
-
-if (gRPC_INSTALL)
-  install(TARGETS gen_percent_encoding_tables EXPORT gRPCTargets
-    RUNTIME DESTINATION ${gRPC_INSTALL_BINDIR}
-    LIBRARY DESTINATION ${gRPC_INSTALL_LIBDIR}
-    ARCHIVE DESTINATION ${gRPC_INSTALL_LIBDIR}
-  )
-endif()
-
 if (gRPC_BUILD_TESTS)
 if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
 
@@ -6130,7 +5947,7 @@
 target_include_directories(goaway_server_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6160,7 +5977,7 @@
 target_include_directories(gpr_avl_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6187,7 +6004,7 @@
 target_include_directories(gpr_cmdline_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6214,7 +6031,7 @@
 target_include_directories(gpr_cpu_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6241,7 +6058,7 @@
 target_include_directories(gpr_env_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6268,7 +6085,7 @@
 target_include_directories(gpr_host_port_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6295,7 +6112,7 @@
 target_include_directories(gpr_log_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6322,7 +6139,7 @@
 target_include_directories(gpr_manual_constructor_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6349,7 +6166,7 @@
 target_include_directories(gpr_mpscq_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6376,7 +6193,7 @@
 target_include_directories(gpr_spinlock_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6403,7 +6220,7 @@
 target_include_directories(gpr_string_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6430,7 +6247,7 @@
 target_include_directories(gpr_sync_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6457,7 +6274,7 @@
 target_include_directories(gpr_thd_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6484,7 +6301,7 @@
 target_include_directories(gpr_time_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6511,7 +6328,7 @@
 target_include_directories(gpr_tls_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6538,7 +6355,7 @@
 target_include_directories(gpr_useful_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6565,7 +6382,7 @@
 target_include_directories(grpc_auth_context_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6594,7 +6411,7 @@
 target_include_directories(grpc_b64_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6623,7 +6440,7 @@
 target_include_directories(grpc_byte_buffer_reader_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6652,7 +6469,7 @@
 target_include_directories(grpc_channel_args_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6681,7 +6498,7 @@
 target_include_directories(grpc_channel_stack_builder_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6710,7 +6527,7 @@
 target_include_directories(grpc_channel_stack_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6739,7 +6556,7 @@
 target_include_directories(grpc_completion_queue_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6768,7 +6585,7 @@
 target_include_directories(grpc_completion_queue_threading_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6796,7 +6613,7 @@
 target_include_directories(grpc_create_jwt
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6832,7 +6649,7 @@
 target_include_directories(grpc_credentials_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6861,7 +6678,7 @@
 target_include_directories(grpc_fetch_oauth2
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6890,7 +6707,7 @@
 target_include_directories(grpc_invalid_channel_args_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6920,7 +6737,7 @@
 target_include_directories(grpc_json_token_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6950,7 +6767,7 @@
 target_include_directories(grpc_jwt_verifier_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6978,7 +6795,7 @@
 target_include_directories(grpc_print_google_default_creds_token
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7013,7 +6830,7 @@
 target_include_directories(grpc_security_connector_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7042,7 +6859,7 @@
 target_include_directories(grpc_ssl_credentials_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7070,7 +6887,7 @@
 target_include_directories(grpc_verify_jwt
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7106,7 +6923,7 @@
 target_include_directories(handshake_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7139,7 +6956,7 @@
 target_include_directories(handshake_server
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7172,7 +6989,7 @@
 target_include_directories(handshake_server_with_readahead_handshaker
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7203,7 +7020,7 @@
 target_include_directories(histogram_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7230,7 +7047,7 @@
 target_include_directories(hpack_parser_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7259,7 +7076,7 @@
 target_include_directories(hpack_table_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7288,7 +7105,7 @@
 target_include_directories(http_parser_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7317,7 +7134,7 @@
 target_include_directories(httpcli_format_request_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7347,7 +7164,7 @@
 target_include_directories(httpcli_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7378,7 +7195,7 @@
 target_include_directories(httpscli_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7408,7 +7225,7 @@
 target_include_directories(init_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7437,7 +7254,7 @@
 target_include_directories(invalid_call_argument_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7466,7 +7283,7 @@
 target_include_directories(json_rewrite
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7493,7 +7310,7 @@
 target_include_directories(json_rewrite_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7522,7 +7339,7 @@
 target_include_directories(json_stream_error_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7551,7 +7368,7 @@
 target_include_directories(json_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7580,7 +7397,7 @@
 target_include_directories(lame_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7609,7 +7426,7 @@
 target_include_directories(lb_policies_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7638,7 +7455,7 @@
 target_include_directories(load_file_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7667,7 +7484,7 @@
 target_include_directories(memory_profile_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7696,7 +7513,7 @@
 target_include_directories(memory_profile_server
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7726,7 +7543,7 @@
 target_include_directories(memory_profile_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7756,7 +7573,7 @@
 target_include_directories(message_compress_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7785,7 +7602,7 @@
 target_include_directories(minimal_stack_is_minimal_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7814,7 +7631,7 @@
 target_include_directories(multiple_server_queues_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7843,7 +7660,7 @@
 target_include_directories(murmur_hash_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7870,7 +7687,7 @@
 target_include_directories(no_server_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7899,7 +7716,7 @@
 target_include_directories(num_external_connectivity_watchers_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7928,7 +7745,7 @@
 target_include_directories(parse_address_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7957,7 +7774,7 @@
 target_include_directories(percent_encoding_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7987,7 +7804,7 @@
 target_include_directories(pollset_set_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8018,7 +7835,7 @@
 target_include_directories(resolve_address_posix_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8048,7 +7865,7 @@
 target_include_directories(resolve_address_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8077,7 +7894,7 @@
 target_include_directories(resource_quota_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8106,7 +7923,7 @@
 target_include_directories(secure_channel_create_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8135,7 +7952,7 @@
 target_include_directories(secure_endpoint_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8164,7 +7981,7 @@
 target_include_directories(sequential_connectivity_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8193,7 +8010,7 @@
 target_include_directories(server_chttp2_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8222,7 +8039,7 @@
 target_include_directories(server_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8251,7 +8068,7 @@
 target_include_directories(slice_buffer_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8280,7 +8097,7 @@
 target_include_directories(slice_hash_table_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8309,7 +8126,7 @@
 target_include_directories(slice_string_helpers_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8338,7 +8155,7 @@
 target_include_directories(slice_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8367,7 +8184,7 @@
 target_include_directories(sockaddr_resolver_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8396,7 +8213,7 @@
 target_include_directories(sockaddr_utils_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8426,7 +8243,7 @@
 target_include_directories(socket_utils_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8458,7 +8275,7 @@
 target_include_directories(ssl_transport_security_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8487,7 +8304,7 @@
 target_include_directories(status_conversion_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8516,7 +8333,7 @@
 target_include_directories(stream_compression_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8545,7 +8362,7 @@
 target_include_directories(stream_owned_slice_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8575,7 +8392,7 @@
 target_include_directories(tcp_client_posix_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8605,7 +8422,7 @@
 target_include_directories(tcp_client_uv_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8635,7 +8452,7 @@
 target_include_directories(tcp_posix_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8666,7 +8483,7 @@
 target_include_directories(tcp_server_posix_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8696,7 +8513,7 @@
 target_include_directories(tcp_server_uv_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8725,7 +8542,7 @@
 target_include_directories(time_averaged_stats_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8754,7 +8571,7 @@
 target_include_directories(timeout_encoding_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8783,7 +8600,7 @@
 target_include_directories(timer_heap_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8812,7 +8629,7 @@
 target_include_directories(timer_list_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8841,7 +8658,7 @@
 target_include_directories(transport_connectivity_state_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8870,7 +8687,7 @@
 target_include_directories(transport_metadata_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8900,7 +8717,7 @@
 target_include_directories(transport_security_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8931,7 +8748,7 @@
 target_include_directories(udp_server_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8961,7 +8778,7 @@
 target_include_directories(uri_parser_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8991,7 +8808,7 @@
 target_include_directories(wakeup_fd_cv_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9023,7 +8840,7 @@
 target_include_directories(alarm_cpp_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9063,7 +8880,7 @@
 target_include_directories(async_end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9103,7 +8920,7 @@
 target_include_directories(auth_property_iterator_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9143,7 +8960,7 @@
 target_include_directories(bdp_estimator_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9184,7 +9001,7 @@
 target_include_directories(bm_arena
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9228,7 +9045,7 @@
 target_include_directories(bm_call_create
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9272,7 +9089,7 @@
 target_include_directories(bm_chttp2_hpack
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9316,7 +9133,7 @@
 target_include_directories(bm_chttp2_transport
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9360,7 +9177,7 @@
 target_include_directories(bm_closure
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9404,7 +9221,7 @@
 target_include_directories(bm_cq
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9448,7 +9265,7 @@
 target_include_directories(bm_cq_multiple_threads
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9492,7 +9309,7 @@
 target_include_directories(bm_error
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9536,7 +9353,7 @@
 target_include_directories(bm_fullstack_streaming_ping_pong
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9580,7 +9397,7 @@
 target_include_directories(bm_fullstack_streaming_pump
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9624,7 +9441,7 @@
 target_include_directories(bm_fullstack_trickle
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9669,7 +9486,7 @@
 target_include_directories(bm_fullstack_unary_ping_pong
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9713,7 +9530,7 @@
 target_include_directories(bm_metadata
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9757,7 +9574,7 @@
 target_include_directories(bm_pollset
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9800,7 +9617,7 @@
 target_include_directories(channel_arguments_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9837,7 +9654,7 @@
 target_include_directories(channel_filter_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9874,7 +9691,7 @@
 target_include_directories(chttp2_settings_timeout_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9912,7 +9729,7 @@
 target_include_directories(cli_call_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9960,7 +9777,7 @@
 target_include_directories(client_channel_stress_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10001,7 +9818,7 @@
 target_include_directories(client_crash_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10042,7 +9859,7 @@
 target_include_directories(client_crash_test_server
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10082,7 +9899,7 @@
 target_include_directories(client_lb_end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10157,7 +9974,7 @@
 target_include_directories(codegen_test_full
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10231,7 +10048,7 @@
 target_include_directories(codegen_test_minimal
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10268,7 +10085,7 @@
 target_include_directories(credentials_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10305,7 +10122,7 @@
 target_include_directories(cxx_byte_buffer_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10344,7 +10161,7 @@
 target_include_directories(cxx_slice_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10383,7 +10200,7 @@
 target_include_directories(cxx_string_ref_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10419,7 +10236,7 @@
 target_include_directories(cxx_time_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10458,7 +10275,7 @@
 target_include_directories(end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10505,7 +10322,7 @@
 target_include_directories(error_details_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10541,7 +10358,7 @@
 target_include_directories(filter_end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10581,7 +10398,7 @@
 target_include_directories(generic_end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10628,7 +10445,7 @@
 target_include_directories(golden_file_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10665,7 +10482,7 @@
 target_include_directories(grpc_cli
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10702,7 +10519,7 @@
 target_include_directories(grpc_cpp_plugin
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10738,7 +10555,7 @@
 target_include_directories(grpc_csharp_plugin
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10774,7 +10591,7 @@
 target_include_directories(grpc_node_plugin
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10810,7 +10627,7 @@
 target_include_directories(grpc_objective_c_plugin
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10846,7 +10663,7 @@
 target_include_directories(grpc_php_plugin
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10882,7 +10699,7 @@
 target_include_directories(grpc_python_plugin
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10918,7 +10735,7 @@
 target_include_directories(grpc_ruby_plugin
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10971,7 +10788,7 @@
 target_include_directories(grpc_tool_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11021,7 +10838,7 @@
 target_include_directories(grpclb_api_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11066,7 +10883,7 @@
 target_include_directories(grpclb_end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11113,7 +10930,7 @@
 target_include_directories(grpclb_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11153,7 +10970,7 @@
 target_include_directories(h2_ssl_cert_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11192,7 +11009,7 @@
 target_include_directories(health_service_end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11232,7 +11049,7 @@
 target_include_directories(http2_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11273,7 +11090,7 @@
 target_include_directories(hybrid_end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11314,7 +11131,7 @@
 target_include_directories(inproc_sync_unary_ping_pong_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11358,7 +11175,7 @@
 target_include_directories(interop_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11402,7 +11219,7 @@
 target_include_directories(interop_server
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11448,7 +11265,7 @@
 target_include_directories(interop_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11489,7 +11306,7 @@
 target_include_directories(json_run_localhost
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11531,7 +11348,7 @@
 target_include_directories(memory_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11577,7 +11394,7 @@
 target_include_directories(metrics_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11615,7 +11432,7 @@
 target_include_directories(mock_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11655,7 +11472,7 @@
 target_include_directories(noop-benchmark
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11690,7 +11507,7 @@
 target_include_directories(proto_server_reflection_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11732,7 +11549,7 @@
 target_include_directories(proto_utils_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11769,7 +11586,7 @@
 target_include_directories(qps_interarrival_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11812,7 +11629,7 @@
 target_include_directories(qps_json_driver
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11856,7 +11673,7 @@
 target_include_directories(qps_openloop_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11900,7 +11717,7 @@
 target_include_directories(qps_worker
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11964,7 +11781,7 @@
 target_include_directories(reconnect_interop_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12026,7 +11843,7 @@
 target_include_directories(reconnect_interop_server
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12069,7 +11886,7 @@
 target_include_directories(ref_counted_ptr_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12108,7 +11925,7 @@
 target_include_directories(ref_counted_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12147,7 +11964,7 @@
 target_include_directories(secure_auth_context_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12188,7 +12005,7 @@
 target_include_directories(secure_sync_unary_ping_pong_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12232,7 +12049,7 @@
 target_include_directories(server_builder_plugin_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12286,7 +12103,7 @@
 target_include_directories(server_builder_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12326,7 +12143,7 @@
 target_include_directories(server_context_test_spouse_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12366,7 +12183,7 @@
 target_include_directories(server_crash_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12407,7 +12224,7 @@
 target_include_directories(server_crash_test_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12461,7 +12278,7 @@
 target_include_directories(server_request_call_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12501,7 +12318,7 @@
 target_include_directories(shutdown_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12541,7 +12358,7 @@
 target_include_directories(stats_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12580,7 +12397,7 @@
 target_include_directories(status_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12620,7 +12437,7 @@
 target_include_directories(streaming_throughput_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12692,7 +12509,7 @@
 target_include_directories(stress_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12733,7 +12550,7 @@
 target_include_directories(thread_manager_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12771,7 +12588,7 @@
 target_include_directories(thread_stress_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12811,7 +12628,7 @@
 target_include_directories(transport_pid_controller_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12852,7 +12669,7 @@
 target_include_directories(writes_per_rpc_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12891,7 +12708,7 @@
 target_include_directories(public_headers_must_be_c89
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12908,6 +12725,104 @@
 )
 
 endif (gRPC_BUILD_TESTS)
+
+add_executable(gen_hpack_tables
+  tools/codegen/core/gen_hpack_tables.cc
+)
+
+
+target_include_directories(gen_hpack_tables
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${BENCHMARK_ROOT_DIR}/include
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+  PRIVATE ${CARES_INCLUDE_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
+)
+
+target_link_libraries(gen_hpack_tables
+  ${_gRPC_ALLTARGETS_LIBRARIES}
+  gpr
+  grpc
+)
+
+
+if (gRPC_INSTALL)
+  install(TARGETS gen_hpack_tables EXPORT gRPCTargets
+    RUNTIME DESTINATION ${gRPC_INSTALL_BINDIR}
+    LIBRARY DESTINATION ${gRPC_INSTALL_LIBDIR}
+    ARCHIVE DESTINATION ${gRPC_INSTALL_LIBDIR}
+  )
+endif()
+
+
+add_executable(gen_legal_metadata_characters
+  tools/codegen/core/gen_legal_metadata_characters.cc
+)
+
+
+target_include_directories(gen_legal_metadata_characters
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${BENCHMARK_ROOT_DIR}/include
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+  PRIVATE ${CARES_INCLUDE_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
+)
+
+target_link_libraries(gen_legal_metadata_characters
+  ${_gRPC_ALLTARGETS_LIBRARIES}
+)
+
+
+if (gRPC_INSTALL)
+  install(TARGETS gen_legal_metadata_characters EXPORT gRPCTargets
+    RUNTIME DESTINATION ${gRPC_INSTALL_BINDIR}
+    LIBRARY DESTINATION ${gRPC_INSTALL_LIBDIR}
+    ARCHIVE DESTINATION ${gRPC_INSTALL_LIBDIR}
+  )
+endif()
+
+
+add_executable(gen_percent_encoding_tables
+  tools/codegen/core/gen_percent_encoding_tables.cc
+)
+
+
+target_include_directories(gen_percent_encoding_tables
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${BENCHMARK_ROOT_DIR}/include
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+  PRIVATE ${CARES_INCLUDE_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
+)
+
+target_link_libraries(gen_percent_encoding_tables
+  ${_gRPC_ALLTARGETS_LIBRARIES}
+)
+
+
+if (gRPC_INSTALL)
+  install(TARGETS gen_percent_encoding_tables EXPORT gRPCTargets
+    RUNTIME DESTINATION ${gRPC_INSTALL_BINDIR}
+    LIBRARY DESTINATION ${gRPC_INSTALL_LIBDIR}
+    ARCHIVE DESTINATION ${gRPC_INSTALL_LIBDIR}
+  )
+endif()
+
 if (gRPC_BUILD_TESTS)
 
 add_executable(badreq_bad_client_test
@@ -12918,7 +12833,7 @@
 target_include_directories(badreq_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12949,7 +12864,7 @@
 target_include_directories(connection_prefix_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12980,7 +12895,7 @@
 target_include_directories(head_of_line_blocking_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13011,7 +12926,7 @@
 target_include_directories(headers_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13042,7 +12957,7 @@
 target_include_directories(initial_settings_frame_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13073,7 +12988,7 @@
 target_include_directories(server_registered_method_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13104,7 +13019,7 @@
 target_include_directories(simple_request_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13135,7 +13050,7 @@
 target_include_directories(unknown_frame_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13166,7 +13081,7 @@
 target_include_directories(window_overflow_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13198,7 +13113,7 @@
 target_include_directories(bad_ssl_cert_server
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13230,7 +13145,7 @@
 target_include_directories(bad_ssl_cert_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13260,7 +13175,7 @@
 target_include_directories(h2_census_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13290,7 +13205,7 @@
 target_include_directories(h2_compress_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13320,7 +13235,7 @@
 target_include_directories(h2_fakesec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13351,7 +13266,7 @@
 target_include_directories(h2_fd_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13382,7 +13297,7 @@
 target_include_directories(h2_full_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13413,7 +13328,7 @@
 target_include_directories(h2_full+pipe_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13444,7 +13359,7 @@
 target_include_directories(h2_full+trace_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13474,7 +13389,7 @@
 target_include_directories(h2_full+workarounds_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13504,7 +13419,7 @@
 target_include_directories(h2_http_proxy_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13534,7 +13449,7 @@
 target_include_directories(h2_load_reporting_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13564,7 +13479,7 @@
 target_include_directories(h2_oauth2_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13594,7 +13509,7 @@
 target_include_directories(h2_proxy_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13624,7 +13539,7 @@
 target_include_directories(h2_sockpair_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13654,7 +13569,7 @@
 target_include_directories(h2_sockpair+trace_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13684,7 +13599,7 @@
 target_include_directories(h2_sockpair_1byte_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13714,7 +13629,7 @@
 target_include_directories(h2_ssl_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13744,7 +13659,7 @@
 target_include_directories(h2_ssl_proxy_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13775,7 +13690,7 @@
 target_include_directories(h2_uds_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13806,7 +13721,7 @@
 target_include_directories(inproc_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13836,7 +13751,7 @@
 target_include_directories(h2_census_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13866,7 +13781,7 @@
 target_include_directories(h2_compress_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13897,7 +13812,7 @@
 target_include_directories(h2_fd_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13928,7 +13843,7 @@
 target_include_directories(h2_full_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13959,7 +13874,7 @@
 target_include_directories(h2_full+pipe_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13990,7 +13905,7 @@
 target_include_directories(h2_full+trace_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14020,7 +13935,7 @@
 target_include_directories(h2_full+workarounds_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14050,7 +13965,7 @@
 target_include_directories(h2_http_proxy_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14080,7 +13995,7 @@
 target_include_directories(h2_load_reporting_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14110,7 +14025,7 @@
 target_include_directories(h2_proxy_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14140,7 +14055,7 @@
 target_include_directories(h2_sockpair_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14170,7 +14085,7 @@
 target_include_directories(h2_sockpair+trace_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14200,7 +14115,7 @@
 target_include_directories(h2_sockpair_1byte_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14231,7 +14146,7 @@
 target_include_directories(h2_uds_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14262,7 +14177,7 @@
 target_include_directories(inproc_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14295,7 +14210,7 @@
 target_include_directories(resolver_component_test_unsecure
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14338,7 +14253,7 @@
 target_include_directories(resolver_component_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14381,7 +14296,7 @@
 target_include_directories(resolver_component_tests_runner_invoker_unsecure
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14424,7 +14339,7 @@
 target_include_directories(resolver_component_tests_runner_invoker
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14465,7 +14380,7 @@
 target_include_directories(api_fuzzer_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14495,7 +14410,7 @@
 target_include_directories(client_fuzzer_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14525,7 +14440,7 @@
 target_include_directories(hpack_parser_fuzzer_test_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14555,7 +14470,7 @@
 target_include_directories(http_request_fuzzer_test_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14585,7 +14500,7 @@
 target_include_directories(http_response_fuzzer_test_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14615,7 +14530,7 @@
 target_include_directories(json_fuzzer_test_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14645,7 +14560,7 @@
 target_include_directories(nanopb_fuzzer_response_test_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14675,7 +14590,7 @@
 target_include_directories(nanopb_fuzzer_serverlist_test_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14705,7 +14620,7 @@
 target_include_directories(percent_decode_fuzzer_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14735,7 +14650,7 @@
 target_include_directories(percent_encode_fuzzer_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14765,7 +14680,7 @@
 target_include_directories(server_fuzzer_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14795,7 +14710,7 @@
 target_include_directories(ssl_server_fuzzer_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14825,7 +14740,7 @@
 target_include_directories(uri_fuzzer_test_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
diff --git a/Makefile b/Makefile
index 9dfd2a8..aabe135 100644
--- a/Makefile
+++ b/Makefile
@@ -980,9 +980,6 @@
 fling_server: $(BINDIR)/$(CONFIG)/fling_server
 fling_stream_test: $(BINDIR)/$(CONFIG)/fling_stream_test
 fling_test: $(BINDIR)/$(CONFIG)/fling_test
-gen_hpack_tables: $(BINDIR)/$(CONFIG)/gen_hpack_tables
-gen_legal_metadata_characters: $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters
-gen_percent_encoding_tables: $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables
 goaway_server_test: $(BINDIR)/$(CONFIG)/goaway_server_test
 gpr_avl_test: $(BINDIR)/$(CONFIG)/gpr_avl_test
 gpr_cmdline_test: $(BINDIR)/$(CONFIG)/gpr_cmdline_test
@@ -1185,6 +1182,9 @@
 transport_pid_controller_test: $(BINDIR)/$(CONFIG)/transport_pid_controller_test
 writes_per_rpc_test: $(BINDIR)/$(CONFIG)/writes_per_rpc_test
 public_headers_must_be_c89: $(BINDIR)/$(CONFIG)/public_headers_must_be_c89
+gen_hpack_tables: $(BINDIR)/$(CONFIG)/gen_hpack_tables
+gen_legal_metadata_characters: $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters
+gen_percent_encoding_tables: $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables
 boringssl_aes_test: $(BINDIR)/$(CONFIG)/boringssl_aes_test
 boringssl_asn1_test: $(BINDIR)/$(CONFIG)/boringssl_asn1_test
 boringssl_base64_test: $(BINDIR)/$(CONFIG)/boringssl_base64_test
@@ -2187,7 +2187,7 @@
 tools: tools_c tools_cxx
 
 
-tools_c: privatelibs_c $(BINDIR)/$(CONFIG)/check_epollexclusive $(BINDIR)/$(CONFIG)/gen_hpack_tables $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables $(BINDIR)/$(CONFIG)/grpc_create_jwt $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token $(BINDIR)/$(CONFIG)/grpc_verify_jwt
+tools_c: privatelibs_c $(BINDIR)/$(CONFIG)/check_epollexclusive $(BINDIR)/$(CONFIG)/grpc_create_jwt $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token $(BINDIR)/$(CONFIG)/grpc_verify_jwt $(BINDIR)/$(CONFIG)/gen_hpack_tables $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables
 
 tools_cxx: privatelibs_cxx
 
@@ -9843,102 +9843,6 @@
 endif
 
 
-GEN_HPACK_TABLES_SRC = \
-    tools/codegen/core/gen_hpack_tables.c \
-
-GEN_HPACK_TABLES_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GEN_HPACK_TABLES_SRC))))
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL.
-
-$(BINDIR)/$(CONFIG)/gen_hpack_tables: openssl_dep_error
-
-else
-
-
-
-$(BINDIR)/$(CONFIG)/gen_hpack_tables: $(GEN_HPACK_TABLES_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(GEN_HPACK_TABLES_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gen_hpack_tables
-
-endif
-
-$(OBJDIR)/$(CONFIG)/tools/codegen/core/gen_hpack_tables.o:  $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a
-
-deps_gen_hpack_tables: $(GEN_HPACK_TABLES_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(GEN_HPACK_TABLES_OBJS:.o=.dep)
-endif
-endif
-
-
-GEN_LEGAL_METADATA_CHARACTERS_SRC = \
-    tools/codegen/core/gen_legal_metadata_characters.c \
-
-GEN_LEGAL_METADATA_CHARACTERS_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GEN_LEGAL_METADATA_CHARACTERS_SRC))))
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL.
-
-$(BINDIR)/$(CONFIG)/gen_legal_metadata_characters: openssl_dep_error
-
-else
-
-
-
-$(BINDIR)/$(CONFIG)/gen_legal_metadata_characters: $(GEN_LEGAL_METADATA_CHARACTERS_OBJS)
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(GEN_LEGAL_METADATA_CHARACTERS_OBJS) $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters
-
-endif
-
-$(OBJDIR)/$(CONFIG)/tools/codegen/core/gen_legal_metadata_characters.o: 
-
-deps_gen_legal_metadata_characters: $(GEN_LEGAL_METADATA_CHARACTERS_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(GEN_LEGAL_METADATA_CHARACTERS_OBJS:.o=.dep)
-endif
-endif
-
-
-GEN_PERCENT_ENCODING_TABLES_SRC = \
-    tools/codegen/core/gen_percent_encoding_tables.c \
-
-GEN_PERCENT_ENCODING_TABLES_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GEN_PERCENT_ENCODING_TABLES_SRC))))
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL.
-
-$(BINDIR)/$(CONFIG)/gen_percent_encoding_tables: openssl_dep_error
-
-else
-
-
-
-$(BINDIR)/$(CONFIG)/gen_percent_encoding_tables: $(GEN_PERCENT_ENCODING_TABLES_OBJS)
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(GEN_PERCENT_ENCODING_TABLES_OBJS) $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables
-
-endif
-
-$(OBJDIR)/$(CONFIG)/tools/codegen/core/gen_percent_encoding_tables.o: 
-
-deps_gen_percent_encoding_tables: $(GEN_PERCENT_ENCODING_TABLES_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(GEN_PERCENT_ENCODING_TABLES_OBJS:.o=.dep)
-endif
-endif
-
-
 GOAWAY_SERVER_TEST_SRC = \
     test/core/end2end/goaway_server_test.cc \
 
@@ -17431,6 +17335,102 @@
 endif
 
 
+GEN_HPACK_TABLES_SRC = \
+    tools/codegen/core/gen_hpack_tables.cc \
+
+GEN_HPACK_TABLES_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GEN_HPACK_TABLES_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/gen_hpack_tables: openssl_dep_error
+
+else
+
+
+
+$(BINDIR)/$(CONFIG)/gen_hpack_tables: $(GEN_HPACK_TABLES_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LD) $(LDFLAGS) $(GEN_HPACK_TABLES_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gen_hpack_tables
+
+endif
+
+$(OBJDIR)/$(CONFIG)/tools/codegen/core/gen_hpack_tables.o:  $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a
+
+deps_gen_hpack_tables: $(GEN_HPACK_TABLES_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(GEN_HPACK_TABLES_OBJS:.o=.dep)
+endif
+endif
+
+
+GEN_LEGAL_METADATA_CHARACTERS_SRC = \
+    tools/codegen/core/gen_legal_metadata_characters.cc \
+
+GEN_LEGAL_METADATA_CHARACTERS_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GEN_LEGAL_METADATA_CHARACTERS_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/gen_legal_metadata_characters: openssl_dep_error
+
+else
+
+
+
+$(BINDIR)/$(CONFIG)/gen_legal_metadata_characters: $(GEN_LEGAL_METADATA_CHARACTERS_OBJS)
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LD) $(LDFLAGS) $(GEN_LEGAL_METADATA_CHARACTERS_OBJS) $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters
+
+endif
+
+$(OBJDIR)/$(CONFIG)/tools/codegen/core/gen_legal_metadata_characters.o: 
+
+deps_gen_legal_metadata_characters: $(GEN_LEGAL_METADATA_CHARACTERS_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(GEN_LEGAL_METADATA_CHARACTERS_OBJS:.o=.dep)
+endif
+endif
+
+
+GEN_PERCENT_ENCODING_TABLES_SRC = \
+    tools/codegen/core/gen_percent_encoding_tables.cc \
+
+GEN_PERCENT_ENCODING_TABLES_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GEN_PERCENT_ENCODING_TABLES_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/gen_percent_encoding_tables: openssl_dep_error
+
+else
+
+
+
+$(BINDIR)/$(CONFIG)/gen_percent_encoding_tables: $(GEN_PERCENT_ENCODING_TABLES_OBJS)
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LD) $(LDFLAGS) $(GEN_PERCENT_ENCODING_TABLES_OBJS) $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables
+
+endif
+
+$(OBJDIR)/$(CONFIG)/tools/codegen/core/gen_percent_encoding_tables.o: 
+
+deps_gen_percent_encoding_tables: $(GEN_PERCENT_ENCODING_TABLES_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(GEN_PERCENT_ENCODING_TABLES_OBJS:.o=.dep)
+endif
+endif
+
+
 
 # boringssl needs an override to ensure that it does not include
 # system openssl headers regardless of other configuration
diff --git a/README.md b/README.md
index 048614a..53078e6 100644
--- a/README.md
+++ b/README.md
@@ -25,15 +25,15 @@
 
 Libraries in different languages may be in different states of development. We are seeking contributions for all of these libraries.
 
-| Language                | Source                              | Status  |
-|-------------------------|-------------------------------------|---------|
-| Shared C [core library] | [src/core](src/core)                | 1.6     |
-| C++                     | [src/cpp](src/cpp)                  | 1.6     |
-| Ruby                    | [src/ruby](src/ruby)                | 1.6     |
-| Python                  | [src/python](src/python)            | 1.6     |
-| PHP                     | [src/php](src/php)                  | 1.6     |
-| C#                      | [src/csharp](src/csharp)            | 1.6     |
-| Objective-C             | [src/objective-c](src/objective-c)  | 1.6     |
+| Language                | Source                              |
+|-------------------------|-------------------------------------|
+| Shared C [core library] | [src/core](src/core)                |
+| C++                     | [src/cpp](src/cpp)                  |
+| Ruby                    | [src/ruby](src/ruby)                |
+| Python                  | [src/python](src/python)            |
+| PHP                     | [src/php](src/php)                  |
+| C#                      | [src/csharp](src/csharp)            |
+| Objective-C             | [src/objective-c](src/objective-c)  |
 
 Java source code is in the [grpc-java](http://github.com/grpc/grpc-java)
 repository. Go source code is in the
diff --git a/WORKSPACE b/WORKSPACE
index bf09aa7..adce809 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -1,127 +1,4 @@
-bind(
-    name = "nanopb",
-    actual = "//third_party/nanopb",
-)
+workspace(name = "com_github_grpc_grpc")
 
-bind(
-    name = "libssl",
-    actual = "@boringssl//:ssl",
-)
-
-bind(
-    name = "zlib",
-    actual = "@com_github_madler_zlib//:z",
-)
-
-bind(
-    name = "protobuf",
-    actual = "@com_google_protobuf//:protobuf",
-)
-
-bind(
-    name = "protobuf_clib",
-    actual = "@com_google_protobuf//:protoc_lib",
-)
-
-bind(
-    name = "protobuf_headers",
-    actual = "@com_google_protobuf//:protobuf_headers",
-)
-
-bind(
-    name = "protocol_compiler",
-    actual = "@com_google_protobuf//:protoc",
-)
-
-bind(
-    name = "cares",
-    actual = "@com_github_cares_cares//:ares",
-)
-
-bind(
-    name = "gtest",
-    actual = "@com_github_google_googletest//:gtest",
-)
-
-bind(
-    name = "gmock",
-    actual = "@com_github_google_googletest//:gmock",
-)
-
-bind(
-    name = "benchmark",
-    actual = "@com_github_google_benchmark//:benchmark",
-)
-
-bind(
-    name = "gflags",
-    actual = "@com_github_gflags_gflags//:gflags",
-)
-
-http_archive(
-    name = "boringssl",
-    # on the master-with-bazel branch
-    url = "https://boringssl.googlesource.com/boringssl/+archive/886e7d75368e3f4fab3f4d0d3584e4abfc557755.tar.gz",
-)
-
-new_http_archive(
-    name = "com_github_madler_zlib",
-    build_file = "third_party/zlib.BUILD",
-    strip_prefix = "zlib-cacf7f1d4e3d44d871b605da3b647f07d718623f",
-    url = "https://github.com/madler/zlib/archive/cacf7f1d4e3d44d871b605da3b647f07d718623f.tar.gz",
-)
-
-http_archive(
-    name = "com_google_protobuf",
-    strip_prefix = "protobuf-2761122b810fe8861004ae785cc3ab39f384d342",
-    url = "https://github.com/google/protobuf/archive/2761122b810fe8861004ae785cc3ab39f384d342.tar.gz",
-)
-
-new_http_archive(
-    name = "com_github_google_googletest",
-    build_file = "third_party/gtest.BUILD",
-    strip_prefix = "googletest-ec44c6c1675c25b9827aacd08c02433cccde7780",
-    url = "https://github.com/google/googletest/archive/ec44c6c1675c25b9827aacd08c02433cccde7780.tar.gz",
-)
-
-http_archive(
-    name = "com_github_gflags_gflags",
-    strip_prefix = "gflags-30dbc81fb5ffdc98ea9b14b1918bfe4e8779b26e",
-    url = "https://github.com/gflags/gflags/archive/30dbc81fb5ffdc98ea9b14b1918bfe4e8779b26e.tar.gz",
-)
-
-new_http_archive(
-    name = "com_github_google_benchmark",
-    build_file = "third_party/benchmark.BUILD",
-    strip_prefix = "benchmark-5b7683f49e1e9223cf9927b24f6fd3d6bd82e3f8",
-    url = "https://github.com/google/benchmark/archive/5b7683f49e1e9223cf9927b24f6fd3d6bd82e3f8.tar.gz",
-)
-
-new_local_repository(
-    name = "cares_local_files",
-    build_file = "third_party/cares/cares_local_files.BUILD",
-    path = "third_party/cares",
-)
-
-new_http_archive(
-    name = "com_github_cares_cares",
-    build_file = "third_party/cares/cares.BUILD",
-    strip_prefix = "c-ares-3be1924221e1326df520f8498d704a5c4c8d0cce",
-    url = "https://github.com/c-ares/c-ares/archive/3be1924221e1326df520f8498d704a5c4c8d0cce.tar.gz",
-)
-
-http_archive(
-    name = "com_google_absl",
-    strip_prefix = "abseil-cpp-cc4bed2d74f7c8717e31f9579214ab52a9c9c610",
-    url = "https://github.com/abseil/abseil-cpp/archive/cc4bed2d74f7c8717e31f9579214ab52a9c9c610.tar.gz",
-)
-
-http_archive(
-    name = "bazel_toolchains",
-    urls = [
-        "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/af4681c3d19f063f090222ec3d04108c4e0ca255.tar.gz",
-        "https://github.com/bazelbuild/bazel-toolchains/archive/af4681c3d19f063f090222ec3d04108c4e0ca255.tar.gz",
-    ],
-    strip_prefix = "bazel-toolchains-af4681c3d19f063f090222ec3d04108c4e0ca255",
-    sha256 = "d58bb2d6c8603f600d522b6104d6192a65339aa26cbba9f11ff5c4b36dedb928",
-)
+load("//bazel:grpc_deps.bzl", "grpc_deps")
+grpc_deps()
diff --git a/bazel/grpc_build_system.bzl b/bazel/grpc_build_system.bzl
index 0f92608..d146ca9 100644
--- a/bazel/grpc_build_system.bzl
+++ b/bazel/grpc_build_system.bzl
@@ -26,6 +26,27 @@
 # The set of pollers to test against if a test exercises polling
 POLLERS = ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv']
 
+def _get_external_deps(external_deps):
+  ret = []
+  for dep in external_deps:
+    if dep == "nanopb":
+      ret.append("//third_party/nanopb")
+    else:
+      ret.append("//external:" + dep)
+  return ret
+
+def _maybe_update_cc_library_hdrs(hdrs):
+  ret = []
+  hdrs_to_update = {
+      "third_party/objective_c/Cronet/bidirectional_stream_c.h": "//third_party:objective_c/Cronet/bidirectional_stream_c.h",
+  }
+  for h in hdrs:
+    if h in hdrs_to_update.keys():
+      ret.append(hdrs_to_update[h])
+    else:
+      ret.append(h)
+  return ret
+
 def grpc_cc_library(name, srcs = [], public_hdrs = [], hdrs = [],
                     external_deps = [], deps = [], standalone = False,
                     language = "C++", testonly = False, visibility = None,
@@ -40,8 +61,8 @@
                       "//conditions:default": [],}) +
               select({"//:remote_execution":  ["GRPC_PORT_ISOLATED_RUNTIME=1"],
                       "//conditions:default": [],}),
-    hdrs = hdrs + public_hdrs,
-    deps = deps + ["//external:" + dep for dep in external_deps],
+    hdrs = _maybe_update_cc_library_hdrs(hdrs + public_hdrs),
+    deps = deps + _get_external_deps(external_deps),
     copts = copts,
     visibility = visibility,
     testonly = testonly,
@@ -82,12 +103,12 @@
     'srcs': srcs,
     'args': args,
     'data': data,
-    'deps': deps + ["//external:" + dep for dep in external_deps],
+    'deps': deps + _get_external_deps(external_deps),
     'copts': copts,
     'linkopts': ["-pthread"],
   }
   if uses_polling:
-    native.cc_binary(testonly=True, **args)
+    native.cc_test(testonly=True, tags=['manual'], **args)
     for poller in POLLERS:
       native.sh_test(
         name = name + '@poller=' + poller,
@@ -114,7 +135,7 @@
     data = data,
     testonly = testonly,
     linkshared = linkshared,
-    deps = deps + ["//external:" + dep for dep in external_deps],
+    deps = deps + _get_external_deps(external_deps),
     copts = copts,
     linkopts = ["-pthread"] + linkopts,
   )
diff --git a/bazel/grpc_deps.bzl b/bazel/grpc_deps.bzl
new file mode 100644
index 0000000..e465312
--- /dev/null
+++ b/bazel/grpc_deps.bzl
@@ -0,0 +1,129 @@
+"""Load dependencies needed to compile and test the grpc library as a 3rd-party consumer."""
+
+def grpc_deps():
+    """Loads dependencies need to compile and test the grpc library."""
+    native.bind(
+        name = "libssl",
+        actual = "@boringssl//:ssl",
+    )
+
+    native.bind(
+        name = "zlib",
+        actual = "@com_github_madler_zlib//:z",
+    )
+
+    native.bind(
+        name = "protobuf",
+        actual = "@com_google_protobuf//:protobuf",
+    )
+
+    native.bind(
+        name = "protobuf_clib",
+        actual = "@com_google_protobuf//:protoc_lib",
+    )
+
+    native.bind(
+        name = "protobuf_headers",
+        actual = "@com_google_protobuf//:protobuf_headers",
+    )
+
+    native.bind(
+        name = "protocol_compiler",
+        actual = "@com_google_protobuf//:protoc",
+    )
+
+    native.bind(
+        name = "cares",
+        actual = "@com_github_cares_cares//:ares",
+    )
+
+    native.bind(
+        name = "gtest",
+        actual = "@com_github_google_googletest//:gtest",
+    )
+
+    native.bind(
+        name = "gmock",
+        actual = "@com_github_google_googletest//:gmock",
+    )
+
+    native.bind(
+        name = "benchmark",
+        actual = "@com_github_google_benchmark//:benchmark",
+    )
+
+    native.bind(
+        name = "gflags",
+        actual = "@com_github_gflags_gflags//:gflags",
+    )
+
+    if "boringssl" not in native.existing_rules():
+        native.http_archive(
+            name = "boringssl",
+            # on the master-with-bazel branch
+            url = "https://boringssl.googlesource.com/boringssl/+archive/886e7d75368e3f4fab3f4d0d3584e4abfc557755.tar.gz",
+        )
+
+    if "com_github_madler_zlib" not in native.existing_rules():
+        native.new_http_archive(
+            name = "com_github_madler_zlib",
+            build_file = "@com_github_grpc_grpc//third_party:zlib.BUILD",
+            strip_prefix = "zlib-cacf7f1d4e3d44d871b605da3b647f07d718623f",
+            url = "https://github.com/madler/zlib/archive/cacf7f1d4e3d44d871b605da3b647f07d718623f.tar.gz",
+        )
+
+    if "com_google_protobuf" not in native.existing_rules():
+        native.http_archive(
+            name = "com_google_protobuf",
+            strip_prefix = "protobuf-2761122b810fe8861004ae785cc3ab39f384d342",
+            url = "https://github.com/google/protobuf/archive/2761122b810fe8861004ae785cc3ab39f384d342.tar.gz",
+        )
+
+    if "com_github_google_googletest" not in native.existing_rules():
+        native.new_http_archive(
+            name = "com_github_google_googletest",
+            build_file = "@com_github_grpc_grpc//third_party:gtest.BUILD",
+            strip_prefix = "googletest-ec44c6c1675c25b9827aacd08c02433cccde7780",
+            url = "https://github.com/google/googletest/archive/ec44c6c1675c25b9827aacd08c02433cccde7780.tar.gz",
+        )
+
+    if "com_github_gflags_gflags" not in native.existing_rules():
+        native.http_archive(
+            name = "com_github_gflags_gflags",
+            strip_prefix = "gflags-30dbc81fb5ffdc98ea9b14b1918bfe4e8779b26e",
+            url = "https://github.com/gflags/gflags/archive/30dbc81fb5ffdc98ea9b14b1918bfe4e8779b26e.tar.gz",
+        )
+
+    if "com_github_google_benchmark" not in native.existing_rules():
+        native.new_http_archive(
+            name = "com_github_google_benchmark",
+            build_file = "@com_github_grpc_grpc//third_party:benchmark.BUILD",
+            strip_prefix = "benchmark-5b7683f49e1e9223cf9927b24f6fd3d6bd82e3f8",
+            url = "https://github.com/google/benchmark/archive/5b7683f49e1e9223cf9927b24f6fd3d6bd82e3f8.tar.gz",
+        )
+
+    if "com_github_cares_cares" not in native.existing_rules():
+        native.new_http_archive(
+            name = "com_github_cares_cares",
+            build_file = "@com_github_grpc_grpc//third_party:cares/cares.BUILD",
+            strip_prefix = "c-ares-3be1924221e1326df520f8498d704a5c4c8d0cce",
+            url = "https://github.com/c-ares/c-ares/archive/3be1924221e1326df520f8498d704a5c4c8d0cce.tar.gz",
+        )
+
+    if "com_google_absl" not in native.existing_rules():
+        native.http_archive(
+            name = "com_google_absl",
+            strip_prefix = "abseil-cpp-cc4bed2d74f7c8717e31f9579214ab52a9c9c610",
+            url = "https://github.com/abseil/abseil-cpp/archive/cc4bed2d74f7c8717e31f9579214ab52a9c9c610.tar.gz",
+        )
+
+    if "com_github_bazelbuild_bazeltoolchains" not in native.existing_rules():
+        native.http_archive(
+            name = "com_github_bazelbuild_bazeltoolchains",
+            strip_prefix = "bazel-toolchains-af4681c3d19f063f090222ec3d04108c4e0ca255",
+            urls = [
+                "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/af4681c3d19f063f090222ec3d04108c4e0ca255.tar.gz",
+                "https://github.com/bazelbuild/bazel-toolchains/archive/af4681c3d19f063f090222ec3d04108c4e0ca255.tar.gz",
+            ],
+            sha256 = "d58bb2d6c8603f600d522b6104d6192a65339aa26cbba9f11ff5c4b36dedb928",
+        )
diff --git a/build.yaml b/build.yaml
index 6158b46..4f74c57 100644
--- a/build.yaml
+++ b/build.yaml
@@ -2121,28 +2121,6 @@
   - mac
   - linux
   - posix
-- name: gen_hpack_tables
-  build: tool
-  language: c
-  src:
-  - tools/codegen/core/gen_hpack_tables.c
-  deps:
-  - gpr
-  - grpc
-  uses_polling: false
-- name: gen_legal_metadata_characters
-  build: tool
-  language: c
-  src:
-  - tools/codegen/core/gen_legal_metadata_characters.c
-  deps: []
-- name: gen_percent_encoding_tables
-  build: tool
-  language: c
-  src:
-  - tools/codegen/core/gen_percent_encoding_tables.c
-  deps: []
-  uses_polling: false
 - name: goaway_server_test
   cpu_cost: 0.1
   build: test
@@ -4846,6 +4824,28 @@
   deps:
   - grpc
   - gpr
+- name: gen_hpack_tables
+  build: tool
+  language: cc
+  src:
+  - tools/codegen/core/gen_hpack_tables.cc
+  deps:
+  - gpr
+  - grpc
+  uses_polling: false
+- name: gen_legal_metadata_characters
+  build: tool
+  language: cc
+  src:
+  - tools/codegen/core/gen_legal_metadata_characters.cc
+  deps: []
+- name: gen_percent_encoding_tables
+  build: tool
+  language: cc
+  src:
+  - tools/codegen/core/gen_percent_encoding_tables.cc
+  deps: []
+  uses_polling: false
 vspackages:
 - linkage: static
   name: grpc.dependencies.zlib
diff --git a/doc/PROTOCOL-HTTP2.md b/doc/PROTOCOL-HTTP2.md
index 29d3cc2..107a8e8 100644
--- a/doc/PROTOCOL-HTTP2.md
+++ b/doc/PROTOCOL-HTTP2.md
@@ -1,7 +1,7 @@
 # gRPC over HTTP2
 
 ## Introduction
-This document serves as a detailed description for an implementation of gRPC carried over HTTP2 draft 17 framing. It assumes familiarity with the HTTP2 specification.
+This document serves as a detailed description for an implementation of gRPC carried over <a href="https://tools.ietf.org/html/rfc7540">HTTP2 framing</a>. It assumes familiarity with the HTTP2 specification.
 
 ## Protocol
 Production rules are using <a href="http://tools.ietf.org/html/rfc5234">ABNF syntax</a>.
@@ -24,7 +24,7 @@
 * **Call-Definition** → Method Scheme Path TE [Authority] [Timeout] Content-Type [Message-Type] [Message-Encoding] [Message-Accept-Encoding] [User-Agent]
 * **Method** →  ":method POST"
 * **Scheme** → ":scheme "  ("http" / "https")
-* **Path** → ":path" "/" Service-Name "/" {_method name_}
+* **Path** → ":path" "/" Service-Name "/" {_method name_}  # But see note below.
 * **Service-Name** → {_IDL-specific service name_}
 * **Authority** → ":authority" {_virtual host name of authority_}
 * **TE** → "te" "trailers"  # Used to detect incompatible proxies
@@ -170,6 +170,7 @@
 grpc-status = 0 # OK
 trace-proto-bin = jher831yy13JHy3hc
 ```
+
 #### User Agents
 
 While the protocol does not require a user-agent to function it is recommended that clients provide a structured user-agent string that provides a basic description of the calling library, version & platform to facilitate issue diagnosis in heterogeneous environments. The following structure is recommended to library developers
@@ -197,7 +198,7 @@
 #### HTTP2 Transport Mapping
 
 ##### Stream Identification
-All GRPC calls need to specify an internal ID. We will use HTTP2 stream-ids as call identifiers in this scheme. NOTE: These id’s are contextual to an open HTTP2 session and will not be unique within a given process that is handling more than one HTTP2 session nor can they be used as GUIDs.
+All GRPC calls need to specify an internal ID. We will use HTTP2 stream-ids as call identifiers in this scheme. NOTE: These ids are contextual to an open HTTP2 session and will not be unique within a given process that is handling more than one HTTP2 session nor can they be used as GUIDs.
 
 ##### Data Frames
 DATA frame boundaries have no relation to **Length-Prefixed-Message** boundaries and implementations should make no assumptions about their alignment.
@@ -232,6 +233,7 @@
 The HTTP2 specification mandates the use of TLS 1.2 or higher when TLS is used with HTTP2. It also places some additional constraints on the allowed ciphers in deployments to avoid known-problems as well as requiring SNI support. It is also expected that HTTP2 will be used in conjunction with proprietary transport security mechanisms about which the specification can make no meaningful recommendations.
 
 ##### Connection Management
+
 ###### GOAWAY Frame
 Sent by servers to clients to indicate that they will no longer accept any new streams on the associated connections. This frame includes the id of the last successfully accepted stream by the server. Clients should consider any stream initiated after the last successfully accepted stream as UNAVAILABLE and retry the call elsewhere. Clients are free to continue working with the already accepted streams until they complete or the connection is terminated.
 
diff --git a/examples/csharp/helloworld-from-cli/Greeter/Greeter.csproj b/examples/csharp/helloworld-from-cli/Greeter/Greeter.csproj
index 6b26be1..3bff4a5 100644
--- a/examples/csharp/helloworld-from-cli/Greeter/Greeter.csproj
+++ b/examples/csharp/helloworld-from-cli/Greeter/Greeter.csproj
@@ -6,14 +6,13 @@
     <DebugType>portable</DebugType>
     <AssemblyName>Greeter</AssemblyName>
     <PackageId>Greeter</PackageId>
-    <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
   </PropertyGroup>
 
   <ItemGroup>
-    <PackageReference Include="Google.Protobuf" Version="3.2.0" />
-    <PackageReference Include="Google.Protobuf.Tools" Version="3.2.0" />
-    <PackageReference Include="Grpc" Version="1.2.2" />
-    <PackageReference Include="Grpc.Tools" Version="1.2.2" />
+    <PackageReference Include="Google.Protobuf" Version="3.5.0" />
+    <PackageReference Include="Google.Protobuf.Tools" Version="3.5.0" />
+    <PackageReference Include="Grpc" Version="1.8.0" />
+    <PackageReference Include="Grpc.Tools" Version="1.8.0" />
   </ItemGroup>
 
 </Project>
diff --git a/examples/csharp/helloworld-from-cli/Greeter/HelloworldGrpc.cs b/examples/csharp/helloworld-from-cli/Greeter/HelloworldGrpc.cs
index 8168b28..c808884 100644
--- a/examples/csharp/helloworld-from-cli/Greeter/HelloworldGrpc.cs
+++ b/examples/csharp/helloworld-from-cli/Greeter/HelloworldGrpc.cs
@@ -15,6 +15,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 //
+#pragma warning disable 1591
 #region Designer generated code
 
 using System;
diff --git a/examples/csharp/helloworld-from-cli/GreeterClient/GreeterClient.csproj b/examples/csharp/helloworld-from-cli/GreeterClient/GreeterClient.csproj
index 24cacfc..d1ed040 100644
--- a/examples/csharp/helloworld-from-cli/GreeterClient/GreeterClient.csproj
+++ b/examples/csharp/helloworld-from-cli/GreeterClient/GreeterClient.csproj
@@ -7,7 +7,6 @@
     <AssemblyName>GreeterClient</AssemblyName>
     <OutputType>Exe</OutputType>
     <PackageId>GreeterClient</PackageId>
-    <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
   </PropertyGroup>
 
   <ItemGroup>
diff --git a/examples/csharp/helloworld-from-cli/GreeterServer/GreeterServer.csproj b/examples/csharp/helloworld-from-cli/GreeterServer/GreeterServer.csproj
index f7980fa..159fbd8 100644
--- a/examples/csharp/helloworld-from-cli/GreeterServer/GreeterServer.csproj
+++ b/examples/csharp/helloworld-from-cli/GreeterServer/GreeterServer.csproj
@@ -7,7 +7,6 @@
     <AssemblyName>GreeterServer</AssemblyName>
     <OutputType>Exe</OutputType>
     <PackageId>GreeterServer</PackageId>
-    <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
   </PropertyGroup>
 
   <ItemGroup>
diff --git a/examples/csharp/helloworld-from-cli/generate_protos.bat b/examples/csharp/helloworld-from-cli/generate_protos.bat
index e290be6..dcf6084 100644
--- a/examples/csharp/helloworld-from-cli/generate_protos.bat
+++ b/examples/csharp/helloworld-from-cli/generate_protos.bat
@@ -19,8 +19,8 @@
 @rem enter this directory
 cd /d %~dp0
 
-set PROTOC=%UserProfile%\.nuget\packages\Google.Protobuf.Tools\3.2.0\tools\windows_x64\protoc.exe
-set PLUGIN=%UserProfile%\.nuget\packages\Grpc.Tools\1.2.2\tools\windows_x64\grpc_csharp_plugin.exe
+set PROTOC=%UserProfile%\.nuget\packages\Google.Protobuf.Tools\3.5.0\tools\windows_x64\protoc.exe
+set PLUGIN=%UserProfile%\.nuget\packages\Grpc.Tools\1.8.0\tools\windows_x64\grpc_csharp_plugin.exe
 
 %PROTOC% -I../../protos --csharp_out Greeter  ../../protos/helloworld.proto --grpc_out Greeter --plugin=protoc-gen-grpc=%PLUGIN%
 
diff --git a/examples/csharp/helloworld/Greeter/Greeter.csproj b/examples/csharp/helloworld/Greeter/Greeter.csproj
index 8dcd2d9..d2597f1 100644
--- a/examples/csharp/helloworld/Greeter/Greeter.csproj
+++ b/examples/csharp/helloworld/Greeter/Greeter.csproj
@@ -32,12 +32,12 @@
     <ConsolePause>false</ConsolePause>
   </PropertyGroup>
   <ItemGroup>
-    <Reference Include="Google.Protobuf, Version=3.2.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
-      <HintPath>..\packages\Google.Protobuf.3.2.0\lib\net45\Google.Protobuf.dll</HintPath>
+    <Reference Include="Google.Protobuf, Version=3.5.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
+      <HintPath>..\packages\Google.Protobuf.3.5.0\lib\net45\Google.Protobuf.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
-      <HintPath>..\packages\Grpc.Core.1.2.2\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.1.8.0\lib\net45\Grpc.Core.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="System" />
@@ -62,11 +62,11 @@
     <None Include="packages.config" />
   </ItemGroup>
   <ItemGroup />
-  <Import Project="..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets'))" />
   </Target>
 </Project>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/Greeter/HelloworldGrpc.cs b/examples/csharp/helloworld/Greeter/HelloworldGrpc.cs
index 8168b28..c808884 100644
--- a/examples/csharp/helloworld/Greeter/HelloworldGrpc.cs
+++ b/examples/csharp/helloworld/Greeter/HelloworldGrpc.cs
@@ -15,6 +15,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 //
+#pragma warning disable 1591
 #region Designer generated code
 
 using System;
diff --git a/examples/csharp/helloworld/Greeter/packages.config b/examples/csharp/helloworld/Greeter/packages.config
index ec83cd8..38297f8 100644
--- a/examples/csharp/helloworld/Greeter/packages.config
+++ b/examples/csharp/helloworld/Greeter/packages.config
@@ -1,8 +1,8 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.2.0" targetFramework="net45" />
-  <package id="Grpc" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Core" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Tools" version="1.2.2" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.5.0" targetFramework="net45" />
+  <package id="Grpc" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Tools" version="1.8.0" targetFramework="net45" />
   <package id="System.Interactive.Async" version="3.1.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/GreeterClient/GreeterClient.csproj b/examples/csharp/helloworld/GreeterClient/GreeterClient.csproj
index 4b6b1b3..470749a 100644
--- a/examples/csharp/helloworld/GreeterClient/GreeterClient.csproj
+++ b/examples/csharp/helloworld/GreeterClient/GreeterClient.csproj
@@ -32,12 +32,12 @@
     <Externalconsole>true</Externalconsole>
   </PropertyGroup>
   <ItemGroup>
-    <Reference Include="Google.Protobuf, Version=3.2.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
-      <HintPath>..\packages\Google.Protobuf.3.2.0\lib\net45\Google.Protobuf.dll</HintPath>
+    <Reference Include="Google.Protobuf, Version=3.5.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
+      <HintPath>..\packages\Google.Protobuf.3.5.0\lib\net45\Google.Protobuf.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
-      <HintPath>..\packages\Grpc.Core.1.2.2\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.1.8.0\lib\net45\Grpc.Core.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="System" />
@@ -60,11 +60,11 @@
   <ItemGroup>
     <None Include="packages.config" />
   </ItemGroup>
-  <Import Project="..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets'))" />
   </Target>
 </Project>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/GreeterClient/packages.config b/examples/csharp/helloworld/GreeterClient/packages.config
index b912fd4..4b3684e 100644
--- a/examples/csharp/helloworld/GreeterClient/packages.config
+++ b/examples/csharp/helloworld/GreeterClient/packages.config
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.2.0" targetFramework="net45" />
-  <package id="Grpc" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Core" version="1.2.2" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.5.0" targetFramework="net45" />
+  <package id="Grpc" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="1.8.0" targetFramework="net45" />
   <package id="System.Interactive.Async" version="3.1.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/GreeterServer/GreeterServer.csproj b/examples/csharp/helloworld/GreeterServer/GreeterServer.csproj
index 97978fa..82e2961 100644
--- a/examples/csharp/helloworld/GreeterServer/GreeterServer.csproj
+++ b/examples/csharp/helloworld/GreeterServer/GreeterServer.csproj
@@ -32,12 +32,12 @@
     <Externalconsole>true</Externalconsole>
   </PropertyGroup>
   <ItemGroup>
-    <Reference Include="Google.Protobuf, Version=3.2.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
-      <HintPath>..\packages\Google.Protobuf.3.2.0\lib\net45\Google.Protobuf.dll</HintPath>
+    <Reference Include="Google.Protobuf, Version=3.5.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
+      <HintPath>..\packages\Google.Protobuf.3.5.0\lib\net45\Google.Protobuf.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
-      <HintPath>..\packages\Grpc.Core.1.2.2\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.1.8.0\lib\net45\Grpc.Core.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="System" />
@@ -60,11 +60,11 @@
   <ItemGroup>
     <None Include="packages.config" />
   </ItemGroup>
-  <Import Project="..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets'))" />
   </Target>
 </Project>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/GreeterServer/packages.config b/examples/csharp/helloworld/GreeterServer/packages.config
index b912fd4..4b3684e 100644
--- a/examples/csharp/helloworld/GreeterServer/packages.config
+++ b/examples/csharp/helloworld/GreeterServer/packages.config
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.2.0" targetFramework="net45" />
-  <package id="Grpc" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Core" version="1.2.2" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.5.0" targetFramework="net45" />
+  <package id="Grpc" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="1.8.0" targetFramework="net45" />
   <package id="System.Interactive.Async" version="3.1.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/generate_protos.bat b/examples/csharp/helloworld/generate_protos.bat
index f955470..45b097e 100644
--- a/examples/csharp/helloworld/generate_protos.bat
+++ b/examples/csharp/helloworld/generate_protos.bat
@@ -19,7 +19,7 @@
 @rem enter this directory
 cd /d %~dp0
 
-set TOOLS_PATH=packages\Grpc.Tools.1.2.2\tools\windows_x86
+set TOOLS_PATH=packages\Grpc.Tools.1.8.0\tools\windows_x86
 
 %TOOLS_PATH%\protoc.exe -I../../protos --csharp_out Greeter  ../../protos/helloworld.proto --grpc_out Greeter --plugin=protoc-gen-grpc=%TOOLS_PATH%\grpc_csharp_plugin.exe
 
diff --git a/examples/csharp/route_guide/RouteGuide/RouteGuide.csproj b/examples/csharp/route_guide/RouteGuide/RouteGuide.csproj
index 360444e..e66e986 100644
--- a/examples/csharp/route_guide/RouteGuide/RouteGuide.csproj
+++ b/examples/csharp/route_guide/RouteGuide/RouteGuide.csproj
@@ -32,12 +32,12 @@
     <WarningLevel>4</WarningLevel>
   </PropertyGroup>
   <ItemGroup>
-    <Reference Include="Google.Protobuf, Version=3.2.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
-      <HintPath>..\packages\Google.Protobuf.3.2.0\lib\net45\Google.Protobuf.dll</HintPath>
+    <Reference Include="Google.Protobuf, Version=3.5.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
+      <HintPath>..\packages\Google.Protobuf.3.5.0\lib\net45\Google.Protobuf.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
-      <HintPath>..\packages\Grpc.Core.1.2.2\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.1.8.0\lib\net45\Grpc.Core.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Newtonsoft.Json, Version=7.0.0.0, Culture=neutral, PublicKeyToken=30ad4fe6b2a6aeed, processorArchitecture=MSIL">
@@ -75,12 +75,12 @@
     </None>
   </ItemGroup>
   <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
-  <Import Project="..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets'))" />
   </Target>
   <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
        Other similar extension points exist, see Microsoft.Common.targets.
diff --git a/examples/csharp/route_guide/RouteGuide/RouteGuideGrpc.cs b/examples/csharp/route_guide/RouteGuide/RouteGuideGrpc.cs
index 26278ea..765d5d5 100644
--- a/examples/csharp/route_guide/RouteGuide/RouteGuideGrpc.cs
+++ b/examples/csharp/route_guide/RouteGuide/RouteGuideGrpc.cs
@@ -15,6 +15,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 //
+#pragma warning disable 1591
 #region Designer generated code
 
 using System;
diff --git a/examples/csharp/route_guide/RouteGuide/packages.config b/examples/csharp/route_guide/RouteGuide/packages.config
index 2dde11f..fe2c995 100644
--- a/examples/csharp/route_guide/RouteGuide/packages.config
+++ b/examples/csharp/route_guide/RouteGuide/packages.config
@@ -1,8 +1,8 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.2.0" targetFramework="net45" />
-  <package id="Grpc" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Core" version="1.2.2" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.5.0" targetFramework="net45" />
+  <package id="Grpc" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="1.8.0" targetFramework="net45" />
   <package id="Newtonsoft.Json" version="7.0.1" targetFramework="net45" />
   <package id="System.Interactive.Async" version="3.1.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/route_guide/RouteGuideClient/RouteGuideClient.csproj b/examples/csharp/route_guide/RouteGuideClient/RouteGuideClient.csproj
index 162eaed..612f60c 100644
--- a/examples/csharp/route_guide/RouteGuideClient/RouteGuideClient.csproj
+++ b/examples/csharp/route_guide/RouteGuideClient/RouteGuideClient.csproj
@@ -34,12 +34,12 @@
     <WarningLevel>4</WarningLevel>
   </PropertyGroup>
   <ItemGroup>
-    <Reference Include="Google.Protobuf, Version=3.2.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
-      <HintPath>..\packages\Google.Protobuf.3.2.0\lib\net45\Google.Protobuf.dll</HintPath>
+    <Reference Include="Google.Protobuf, Version=3.5.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
+      <HintPath>..\packages\Google.Protobuf.3.5.0\lib\net45\Google.Protobuf.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
-      <HintPath>..\packages\Grpc.Core.1.2.2\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.1.8.0\lib\net45\Grpc.Core.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Newtonsoft.Json, Version=7.0.0.0, Culture=neutral, PublicKeyToken=30ad4fe6b2a6aeed, processorArchitecture=MSIL">
@@ -72,12 +72,12 @@
     </ProjectReference>
   </ItemGroup>
   <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
-  <Import Project="..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets'))" />
   </Target>
   <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
        Other similar extension points exist, see Microsoft.Common.targets.
diff --git a/examples/csharp/route_guide/RouteGuideClient/packages.config b/examples/csharp/route_guide/RouteGuideClient/packages.config
index 2dde11f..fe2c995 100644
--- a/examples/csharp/route_guide/RouteGuideClient/packages.config
+++ b/examples/csharp/route_guide/RouteGuideClient/packages.config
@@ -1,8 +1,8 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.2.0" targetFramework="net45" />
-  <package id="Grpc" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Core" version="1.2.2" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.5.0" targetFramework="net45" />
+  <package id="Grpc" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="1.8.0" targetFramework="net45" />
   <package id="Newtonsoft.Json" version="7.0.1" targetFramework="net45" />
   <package id="System.Interactive.Async" version="3.1.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/route_guide/RouteGuideServer/RouteGuideServer.csproj b/examples/csharp/route_guide/RouteGuideServer/RouteGuideServer.csproj
index b6f2f35..4d9d9d7 100644
--- a/examples/csharp/route_guide/RouteGuideServer/RouteGuideServer.csproj
+++ b/examples/csharp/route_guide/RouteGuideServer/RouteGuideServer.csproj
@@ -34,12 +34,12 @@
     <WarningLevel>4</WarningLevel>
   </PropertyGroup>
   <ItemGroup>
-    <Reference Include="Google.Protobuf, Version=3.2.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
-      <HintPath>..\packages\Google.Protobuf.3.2.0\lib\net45\Google.Protobuf.dll</HintPath>
+    <Reference Include="Google.Protobuf, Version=3.5.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
+      <HintPath>..\packages\Google.Protobuf.3.5.0\lib\net45\Google.Protobuf.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
-      <HintPath>..\packages\Grpc.Core.1.2.2\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.1.8.0\lib\net45\Grpc.Core.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Newtonsoft.Json, Version=7.0.0.0, Culture=neutral, PublicKeyToken=30ad4fe6b2a6aeed, processorArchitecture=MSIL">
@@ -73,12 +73,12 @@
     </ProjectReference>
   </ItemGroup>
   <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
-  <Import Project="..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets'))" />
   </Target>
   <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
        Other similar extension points exist, see Microsoft.Common.targets.
diff --git a/examples/csharp/route_guide/RouteGuideServer/packages.config b/examples/csharp/route_guide/RouteGuideServer/packages.config
index 46df645..2bb1f0d 100644
--- a/examples/csharp/route_guide/RouteGuideServer/packages.config
+++ b/examples/csharp/route_guide/RouteGuideServer/packages.config
@@ -1,9 +1,9 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.2.0" targetFramework="net45" />
-  <package id="Grpc" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Core" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Tools" version="1.2.2" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.5.0" targetFramework="net45" />
+  <package id="Grpc" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Tools" version="1.8.0" targetFramework="net45" />
   <package id="Newtonsoft.Json" version="7.0.1" targetFramework="net45" />
   <package id="System.Interactive.Async" version="3.1.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/route_guide/generate_protos.bat b/examples/csharp/route_guide/generate_protos.bat
index 7311683..a8c9cb5 100644
--- a/examples/csharp/route_guide/generate_protos.bat
+++ b/examples/csharp/route_guide/generate_protos.bat
@@ -19,7 +19,7 @@
 @rem enter this directory
 cd /d %~dp0
 
-set TOOLS_PATH=packages\Grpc.Tools.1.2.2\tools\windows_x86
+set TOOLS_PATH=packages\Grpc.Tools.1.8.0\tools\windows_x86
 
 %TOOLS_PATH%\protoc.exe -I../../protos --csharp_out RouteGuide  ../../protos/route_guide.proto --grpc_out RouteGuide --plugin=protoc-gen-grpc=%TOOLS_PATH%\grpc_csharp_plugin.exe
 
diff --git a/examples/python/helloworld/greeter_client.py b/examples/python/helloworld/greeter_client.py
index d9b2bdf..a0aeb47 100644
--- a/examples/python/helloworld/greeter_client.py
+++ b/examples/python/helloworld/greeter_client.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """The Python implementation of the GRPC helloworld.Greeter client."""
 
 from __future__ import print_function
@@ -23,11 +22,11 @@
 
 
 def run():
-  channel = grpc.insecure_channel('localhost:50051')
-  stub = helloworld_pb2_grpc.GreeterStub(channel)
-  response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
-  print("Greeter client received: " + response.message)
+    channel = grpc.insecure_channel('localhost:50051')
+    stub = helloworld_pb2_grpc.GreeterStub(channel)
+    response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
+    print("Greeter client received: " + response.message)
 
 
 if __name__ == '__main__':
-  run()
+    run()
diff --git a/examples/python/helloworld/greeter_server.py b/examples/python/helloworld/greeter_server.py
index be61695..c355662 100644
--- a/examples/python/helloworld/greeter_server.py
+++ b/examples/python/helloworld/greeter_server.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """The Python implementation of the GRPC helloworld.Greeter server."""
 
 from concurrent import futures
@@ -27,20 +26,21 @@
 
 class Greeter(helloworld_pb2_grpc.GreeterServicer):
 
-  def SayHello(self, request, context):
-    return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
+    def SayHello(self, request, context):
+        return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
 
 
 def serve():
-  server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
-  helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
-  server.add_insecure_port('[::]:50051')
-  server.start()
-  try:
-    while True:
-      time.sleep(_ONE_DAY_IN_SECONDS)
-  except KeyboardInterrupt:
-    server.stop(0)
+    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+    helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
+    server.add_insecure_port('[::]:50051')
+    server.start()
+    try:
+        while True:
+            time.sleep(_ONE_DAY_IN_SECONDS)
+    except KeyboardInterrupt:
+        server.stop(0)
+
 
 if __name__ == '__main__':
-  serve()
+    serve()
diff --git a/examples/python/interceptors/default_value/default_value_client_interceptor.py b/examples/python/interceptors/default_value/default_value_client_interceptor.py
new file mode 100644
index 0000000..c549f2b
--- /dev/null
+++ b/examples/python/interceptors/default_value/default_value_client_interceptor.py
@@ -0,0 +1,68 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Interceptor that adds headers to outgoing requests."""
+
+import collections
+
+import grpc
+
+
+class _ConcreteValue(grpc.Future):
+
+    def __init__(self, result):
+        self._result = result
+
+    def cancel(self):
+        return False
+
+    def cancelled(self):
+        return False
+
+    def running(self):
+        return False
+
+    def done(self):
+        return True
+
+    def result(self, timeout=None):
+        return self._result
+
+    def exception(self, timeout=None):
+        return None
+
+    def traceback(self, timeout=None):
+        return None
+
+    def add_done_callback(self, fn):
+        fn(self._result)
+
+
+class DefaultValueClientInterceptor(grpc.UnaryUnaryClientInterceptor,
+                                    grpc.StreamUnaryClientInterceptor):
+
+    def __init__(self, value):
+        self._default = _ConcreteValue(value)
+
+    def _intercept_call(self, continuation, client_call_details,
+                        request_or_iterator):
+        response = continuation(client_call_details, request_or_iterator)
+        return self._default if response.exception() else response
+
+    def intercept_unary_unary(self, continuation, client_call_details, request):
+        return self._intercept_call(continuation, client_call_details, request)
+
+    def intercept_stream_unary(self, continuation, client_call_details,
+                               request_iterator):
+        return self._intercept_call(continuation, client_call_details,
+                                    request_iterator)
diff --git a/examples/python/interceptors/default_value/greeter_client.py b/examples/python/interceptors/default_value/greeter_client.py
new file mode 100644
index 0000000..aba7571
--- /dev/null
+++ b/examples/python/interceptors/default_value/greeter_client.py
@@ -0,0 +1,38 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""The Python implementation of the gRPC helloworld.Greeter client."""
+
+from __future__ import print_function
+
+import grpc
+
+import helloworld_pb2
+import helloworld_pb2_grpc
+import default_value_client_interceptor
+
+
+def run():
+    default_value = helloworld_pb2.HelloReply(
+        message='Hello from your local interceptor!')
+    default_value_interceptor = default_value_client_interceptor.DefaultValueClientInterceptor(
+        default_value)
+    channel = grpc.insecure_channel('localhost:50051')
+    channel = grpc.intercept_channel(channel, default_value_interceptor)
+    stub = helloworld_pb2_grpc.GreeterStub(channel)
+    response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
+    print("Greeter client received: " + response.message)
+
+
+if __name__ == '__main__':
+    run()
diff --git a/examples/python/interceptors/default_value/helloworld_pb2.py b/examples/python/interceptors/default_value/helloworld_pb2.py
new file mode 100644
index 0000000..e18ab9a
--- /dev/null
+++ b/examples/python/interceptors/default_value/helloworld_pb2.py
@@ -0,0 +1,134 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: helloworld.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='helloworld.proto',
+  package='helloworld',
+  syntax='proto3',
+  serialized_pb=_b('\n\x10helloworld.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x42\x36\n\x1bio.grpc.examples.helloworldB\x0fHelloWorldProtoP\x01\xa2\x02\x03HLWb\x06proto3')
+)
+
+
+
+
+_HELLOREQUEST = _descriptor.Descriptor(
+  name='HelloRequest',
+  full_name='helloworld.HelloRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='helloworld.HelloRequest.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=32,
+  serialized_end=60,
+)
+
+
+_HELLOREPLY = _descriptor.Descriptor(
+  name='HelloReply',
+  full_name='helloworld.HelloReply',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='message', full_name='helloworld.HelloReply.message', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=62,
+  serialized_end=91,
+)
+
+DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST
+DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(
+  DESCRIPTOR = _HELLOREQUEST,
+  __module__ = 'helloworld_pb2'
+  # @@protoc_insertion_point(class_scope:helloworld.HelloRequest)
+  ))
+_sym_db.RegisterMessage(HelloRequest)
+
+HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict(
+  DESCRIPTOR = _HELLOREPLY,
+  __module__ = 'helloworld_pb2'
+  # @@protoc_insertion_point(class_scope:helloworld.HelloReply)
+  ))
+_sym_db.RegisterMessage(HelloReply)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW'))
+
+_GREETER = _descriptor.ServiceDescriptor(
+  name='Greeter',
+  full_name='helloworld.Greeter',
+  file=DESCRIPTOR,
+  index=0,
+  options=None,
+  serialized_start=93,
+  serialized_end=166,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='SayHello',
+    full_name='helloworld.Greeter.SayHello',
+    index=0,
+    containing_service=None,
+    input_type=_HELLOREQUEST,
+    output_type=_HELLOREPLY,
+    options=None,
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_GREETER)
+
+DESCRIPTOR.services_by_name['Greeter'] = _GREETER
+
+# @@protoc_insertion_point(module_scope)
diff --git a/examples/python/interceptors/default_value/helloworld_pb2_grpc.py b/examples/python/interceptors/default_value/helloworld_pb2_grpc.py
new file mode 100644
index 0000000..18e07d1
--- /dev/null
+++ b/examples/python/interceptors/default_value/helloworld_pb2_grpc.py
@@ -0,0 +1,46 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+import helloworld_pb2 as helloworld__pb2
+
+
+class GreeterStub(object):
+  """The greeting service definition.
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.SayHello = channel.unary_unary(
+        '/helloworld.Greeter/SayHello',
+        request_serializer=helloworld__pb2.HelloRequest.SerializeToString,
+        response_deserializer=helloworld__pb2.HelloReply.FromString,
+        )
+
+
+class GreeterServicer(object):
+  """The greeting service definition.
+  """
+
+  def SayHello(self, request, context):
+    """Sends a greeting
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_GreeterServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'SayHello': grpc.unary_unary_rpc_method_handler(
+          servicer.SayHello,
+          request_deserializer=helloworld__pb2.HelloRequest.FromString,
+          response_serializer=helloworld__pb2.HelloReply.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'helloworld.Greeter', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
diff --git a/examples/python/interceptors/headers/generic_client_interceptor.py b/examples/python/interceptors/headers/generic_client_interceptor.py
new file mode 100644
index 0000000..30b0755
--- /dev/null
+++ b/examples/python/interceptors/headers/generic_client_interceptor.py
@@ -0,0 +1,55 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Base class for interceptors that operate on all RPC types."""
+
+import grpc
+
+
+class _GenericClientInterceptor(
+        grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor,
+        grpc.StreamUnaryClientInterceptor, grpc.StreamStreamClientInterceptor):
+
+    def __init__(self, interceptor_function):
+        self._fn = interceptor_function
+
+    def intercept_unary_unary(self, continuation, client_call_details, request):
+        new_details, new_request_iterator, postprocess = self._fn(
+            client_call_details, iter((request,)), False, False)
+        response = continuation(new_details, next(new_request_iterator))
+        return postprocess(response) if postprocess else response
+
+    def intercept_unary_stream(self, continuation, client_call_details,
+                               request):
+        new_details, new_request_iterator, postprocess = self._fn(
+            client_call_details, iter((request,)), False, True)
+        response_it = continuation(new_details, new_request_iterator)
+        return postprocess(response_it) if postprocess else response_it
+
+    def intercept_stream_unary(self, continuation, client_call_details,
+                               request_iterator):
+        new_details, new_request_iterator, postprocess = self._fn(
+            client_call_details, request_iterator, True, False)
+        response = continuation(new_details, next(new_request_iterator))
+        return postprocess(response) if postprocess else response
+
+    def intercept_stream_stream(self, continuation, client_call_details,
+                                request_iterator):
+        new_details, new_request_iterator, postprocess = self._fn(
+            client_call_details, request_iterator, True, True)
+        response_it = continuation(new_details, new_request_iterator)
+        return postprocess(response_it) if postprocess else response_it
+
+
+def create(intercept_call):
+    return _GenericClientInterceptor(intercept_call)
diff --git a/examples/python/interceptors/headers/greeter_client.py b/examples/python/interceptors/headers/greeter_client.py
new file mode 100644
index 0000000..2b0dd3e
--- /dev/null
+++ b/examples/python/interceptors/headers/greeter_client.py
@@ -0,0 +1,36 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""The Python implementation of the GRPC helloworld.Greeter client."""
+
+from __future__ import print_function
+
+import grpc
+
+import helloworld_pb2
+import helloworld_pb2_grpc
+import header_manipulator_client_interceptor
+
+
+def run():
+    header_adder_interceptor = header_manipulator_client_interceptor.header_adder_interceptor(
+        'one-time-password', '42')
+    channel = grpc.insecure_channel('localhost:50051')
+    channel = grpc.intercept_channel(channel, header_adder_interceptor)
+    stub = helloworld_pb2_grpc.GreeterStub(channel)
+    response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
+    print("Greeter client received: " + response.message)
+
+
+if __name__ == '__main__':
+    run()
diff --git a/examples/python/interceptors/headers/greeter_server.py b/examples/python/interceptors/headers/greeter_server.py
new file mode 100644
index 0000000..0196860
--- /dev/null
+++ b/examples/python/interceptors/headers/greeter_server.py
@@ -0,0 +1,52 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""The Python implementation of the GRPC helloworld.Greeter server."""
+
+from concurrent import futures
+import time
+
+import grpc
+
+import helloworld_pb2
+import helloworld_pb2_grpc
+from request_header_validator_interceptor import RequestHeaderValidatorInterceptor
+
+_ONE_DAY_IN_SECONDS = 60 * 60 * 24
+
+
+class Greeter(helloworld_pb2_grpc.GreeterServicer):
+
+    def SayHello(self, request, context):
+        return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
+
+
+def serve():
+    header_validator = RequestHeaderValidatorInterceptor(
+        'one-time-password', '42', grpc.StatusCode.UNAUTHENTICATED,
+        'Access denied!')
+    server = grpc.server(
+        futures.ThreadPoolExecutor(max_workers=10),
+        interceptors=(header_validator,))
+    helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
+    server.add_insecure_port('[::]:50051')
+    server.start()
+    try:
+        while True:
+            time.sleep(_ONE_DAY_IN_SECONDS)
+    except KeyboardInterrupt:
+        server.stop(0)
+
+
+if __name__ == '__main__':
+    serve()
diff --git a/examples/python/interceptors/headers/header_manipulator_client_interceptor.py b/examples/python/interceptors/headers/header_manipulator_client_interceptor.py
new file mode 100644
index 0000000..ac7c605
--- /dev/null
+++ b/examples/python/interceptors/headers/header_manipulator_client_interceptor.py
@@ -0,0 +1,42 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Interceptor that adds headers to outgoing requests."""
+
+import collections
+
+import grpc
+import generic_client_interceptor
+
+
+class _ClientCallDetails(
+        collections.namedtuple('_ClientCallDetails',
+                               ('method', 'timeout', 'metadata',
+                                'credentials')), grpc.ClientCallDetails):
+    pass
+
+
+def header_adder_interceptor(header, value):
+
+    def intercept_call(client_call_details, request_iterator, request_streaming,
+                       response_streaming):
+        metadata = []
+        if client_call_details.metadata is not None:
+            metadata = list(client_call_details.metadata)
+        metadata.append((header, value,))
+        client_call_details = _ClientCallDetails(
+            client_call_details.method, client_call_details.timeout, metadata,
+            client_call_details.credentials)
+        return client_call_details, request_iterator, None
+
+    return generic_client_interceptor.create(intercept_call)
diff --git a/examples/python/interceptors/headers/helloworld_pb2.py b/examples/python/interceptors/headers/helloworld_pb2.py
new file mode 100644
index 0000000..e18ab9a
--- /dev/null
+++ b/examples/python/interceptors/headers/helloworld_pb2.py
@@ -0,0 +1,134 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: helloworld.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='helloworld.proto',
+  package='helloworld',
+  syntax='proto3',
+  serialized_pb=_b('\n\x10helloworld.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x42\x36\n\x1bio.grpc.examples.helloworldB\x0fHelloWorldProtoP\x01\xa2\x02\x03HLWb\x06proto3')
+)
+
+
+
+
+_HELLOREQUEST = _descriptor.Descriptor(
+  name='HelloRequest',
+  full_name='helloworld.HelloRequest',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='helloworld.HelloRequest.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=32,
+  serialized_end=60,
+)
+
+
+_HELLOREPLY = _descriptor.Descriptor(
+  name='HelloReply',
+  full_name='helloworld.HelloReply',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='message', full_name='helloworld.HelloReply.message', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=62,
+  serialized_end=91,
+)
+
+DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST
+DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(
+  DESCRIPTOR = _HELLOREQUEST,
+  __module__ = 'helloworld_pb2'
+  # @@protoc_insertion_point(class_scope:helloworld.HelloRequest)
+  ))
+_sym_db.RegisterMessage(HelloRequest)
+
+HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict(
+  DESCRIPTOR = _HELLOREPLY,
+  __module__ = 'helloworld_pb2'
+  # @@protoc_insertion_point(class_scope:helloworld.HelloReply)
+  ))
+_sym_db.RegisterMessage(HelloReply)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW'))
+
+_GREETER = _descriptor.ServiceDescriptor(
+  name='Greeter',
+  full_name='helloworld.Greeter',
+  file=DESCRIPTOR,
+  index=0,
+  options=None,
+  serialized_start=93,
+  serialized_end=166,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='SayHello',
+    full_name='helloworld.Greeter.SayHello',
+    index=0,
+    containing_service=None,
+    input_type=_HELLOREQUEST,
+    output_type=_HELLOREPLY,
+    options=None,
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_GREETER)
+
+DESCRIPTOR.services_by_name['Greeter'] = _GREETER
+
+# @@protoc_insertion_point(module_scope)
diff --git a/examples/python/interceptors/headers/helloworld_pb2_grpc.py b/examples/python/interceptors/headers/helloworld_pb2_grpc.py
new file mode 100644
index 0000000..18e07d1
--- /dev/null
+++ b/examples/python/interceptors/headers/helloworld_pb2_grpc.py
@@ -0,0 +1,46 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+import helloworld_pb2 as helloworld__pb2
+
+
+class GreeterStub(object):
+  """The greeting service definition.
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.SayHello = channel.unary_unary(
+        '/helloworld.Greeter/SayHello',
+        request_serializer=helloworld__pb2.HelloRequest.SerializeToString,
+        response_deserializer=helloworld__pb2.HelloReply.FromString,
+        )
+
+
+class GreeterServicer(object):
+  """The greeting service definition.
+  """
+
+  def SayHello(self, request, context):
+    """Sends a greeting
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_GreeterServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'SayHello': grpc.unary_unary_rpc_method_handler(
+          servicer.SayHello,
+          request_deserializer=helloworld__pb2.HelloRequest.FromString,
+          response_serializer=helloworld__pb2.HelloReply.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'helloworld.Greeter', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
diff --git a/examples/python/interceptors/headers/request_header_validator_interceptor.py b/examples/python/interceptors/headers/request_header_validator_interceptor.py
new file mode 100644
index 0000000..95af417
--- /dev/null
+++ b/examples/python/interceptors/headers/request_header_validator_interceptor.py
@@ -0,0 +1,39 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Interceptor that ensures a specific header is present."""
+
+import grpc
+
+
+def _unary_unary_rpc_terminator(code, details):
+
+    def terminate(ignored_request, context):
+        context.abort(code, details)
+
+    return grpc.unary_unary_rpc_method_handler(terminate)
+
+
+class RequestHeaderValidatorInterceptor(grpc.ServerInterceptor):
+
+    def __init__(self, header, value, code, details):
+        self._header = header
+        self._value = value
+        self._terminator = _unary_unary_rpc_terminator(code, details)
+
+    def intercept_service(self, continuation, handler_call_details):
+        if (self._header,
+                self._value) in handler_call_details.invocation_metadata:
+            return continuation(handler_call_details)
+        else:
+            return self._terminator
diff --git a/examples/python/multiplex/multiplex_client.py b/examples/python/multiplex/multiplex_client.py
index c8c700a..49713f3 100644
--- a/examples/python/multiplex/multiplex_client.py
+++ b/examples/python/multiplex/multiplex_client.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """A client that makes both Greeter and RouteGuide RPCs."""
 
 from __future__ import print_function
@@ -29,98 +28,99 @@
 
 
 def make_route_note(message, latitude, longitude):
-  return route_guide_pb2.RouteNote(
-      message=message,
-      location=route_guide_pb2.Point(latitude=latitude, longitude=longitude))
+    return route_guide_pb2.RouteNote(
+        message=message,
+        location=route_guide_pb2.Point(latitude=latitude, longitude=longitude))
 
 
 def guide_get_one_feature(route_guide_stub, point):
-  feature = route_guide_stub.GetFeature(point)
-  if not feature.location:
-    print("Server returned incomplete feature")
-    return
+    feature = route_guide_stub.GetFeature(point)
+    if not feature.location:
+        print("Server returned incomplete feature")
+        return
 
-  if feature.name:
-    print("Feature called %s at %s" % (feature.name, feature.location))
-  else:
-    print("Found no feature at %s" % feature.location)
+    if feature.name:
+        print("Feature called %s at %s" % (feature.name, feature.location))
+    else:
+        print("Found no feature at %s" % feature.location)
 
 
 def guide_get_feature(route_guide_stub):
-  guide_get_one_feature(
-      route_guide_stub,
-      route_guide_pb2.Point(latitude=409146138, longitude=-746188906))
-  guide_get_one_feature(
-      route_guide_stub, route_guide_pb2.Point(latitude=0, longitude=0))
+    guide_get_one_feature(
+        route_guide_stub,
+        route_guide_pb2.Point(latitude=409146138, longitude=-746188906))
+    guide_get_one_feature(route_guide_stub,
+                          route_guide_pb2.Point(latitude=0, longitude=0))
 
 
 def guide_list_features(route_guide_stub):
-  rectangle = route_guide_pb2.Rectangle(
-      lo=route_guide_pb2.Point(latitude=400000000, longitude=-750000000),
-      hi=route_guide_pb2.Point(latitude=420000000, longitude=-730000000))
-  print("Looking for features between 40, -75 and 42, -73")
+    rectangle = route_guide_pb2.Rectangle(
+        lo=route_guide_pb2.Point(latitude=400000000, longitude=-750000000),
+        hi=route_guide_pb2.Point(latitude=420000000, longitude=-730000000))
+    print("Looking for features between 40, -75 and 42, -73")
 
-  features = route_guide_stub.ListFeatures(rectangle)
+    features = route_guide_stub.ListFeatures(rectangle)
 
-  for feature in features:
-    print("Feature called %s at %s" % (feature.name, feature.location))
+    for feature in features:
+        print("Feature called %s at %s" % (feature.name, feature.location))
 
 
 def generate_route(feature_list):
-  for _ in range(0, 10):
-    random_feature = feature_list[random.randint(0, len(feature_list) - 1)]
-    print("Visiting point %s" % random_feature.location)
-    yield random_feature.location
-    time.sleep(random.uniform(0.5, 1.5))
+    for _ in range(0, 10):
+        random_feature = feature_list[random.randint(0, len(feature_list) - 1)]
+        print("Visiting point %s" % random_feature.location)
+        yield random_feature.location
+        time.sleep(random.uniform(0.5, 1.5))
 
 
 def guide_record_route(route_guide_stub):
-  feature_list = route_guide_resources.read_route_guide_database()
+    feature_list = route_guide_resources.read_route_guide_database()
 
-  route_iterator = generate_route(feature_list)
-  route_summary = route_guide_stub.RecordRoute(route_iterator)
-  print("Finished trip with %s points " % route_summary.point_count)
-  print("Passed %s features " % route_summary.feature_count)
-  print("Travelled %s meters " % route_summary.distance)
-  print("It took %s seconds " % route_summary.elapsed_time)
+    route_iterator = generate_route(feature_list)
+    route_summary = route_guide_stub.RecordRoute(route_iterator)
+    print("Finished trip with %s points " % route_summary.point_count)
+    print("Passed %s features " % route_summary.feature_count)
+    print("Travelled %s meters " % route_summary.distance)
+    print("It took %s seconds " % route_summary.elapsed_time)
 
 
 def generate_messages():
-  messages = [
-      make_route_note("First message", 0, 0),
-      make_route_note("Second message", 0, 1),
-      make_route_note("Third message", 1, 0),
-      make_route_note("Fourth message", 0, 0),
-      make_route_note("Fifth message", 1, 0),
-  ]
-  for msg in messages:
-    print("Sending %s at %s" % (msg.message, msg.location))
-    yield msg
-    time.sleep(random.uniform(0.5, 1.0))
+    messages = [
+        make_route_note("First message", 0, 0),
+        make_route_note("Second message", 0, 1),
+        make_route_note("Third message", 1, 0),
+        make_route_note("Fourth message", 0, 0),
+        make_route_note("Fifth message", 1, 0),
+    ]
+    for msg in messages:
+        print("Sending %s at %s" % (msg.message, msg.location))
+        yield msg
+        time.sleep(random.uniform(0.5, 1.0))
 
 
 def guide_route_chat(route_guide_stub):
-  responses = route_guide_stub.RouteChat(generate_messages())
-  for response in responses:
-    print("Received message %s at %s" % (response.message, response.location))
+    responses = route_guide_stub.RouteChat(generate_messages())
+    for response in responses:
+        print("Received message %s at %s" %
+              (response.message, response.location))
 
 
 def run():
-  channel = grpc.insecure_channel('localhost:50051')
-  greeter_stub = helloworld_pb2_grpc.GreeterStub(channel)
-  route_guide_stub = route_guide_pb2_grpc.RouteGuideStub(channel)
-  greeter_response = greeter_stub.SayHello(
-      helloworld_pb2.HelloRequest(name='you'))
-  print("Greeter client received: " + greeter_response.message)
-  print("-------------- GetFeature --------------")
-  guide_get_feature(route_guide_stub)
-  print("-------------- ListFeatures --------------")
-  guide_list_features(route_guide_stub)
-  print("-------------- RecordRoute --------------")
-  guide_record_route(route_guide_stub)
-  print("-------------- RouteChat --------------")
-  guide_route_chat(route_guide_stub)
+    channel = grpc.insecure_channel('localhost:50051')
+    greeter_stub = helloworld_pb2_grpc.GreeterStub(channel)
+    route_guide_stub = route_guide_pb2_grpc.RouteGuideStub(channel)
+    greeter_response = greeter_stub.SayHello(
+        helloworld_pb2.HelloRequest(name='you'))
+    print("Greeter client received: " + greeter_response.message)
+    print("-------------- GetFeature --------------")
+    guide_get_feature(route_guide_stub)
+    print("-------------- ListFeatures --------------")
+    guide_list_features(route_guide_stub)
+    print("-------------- RecordRoute --------------")
+    guide_record_route(route_guide_stub)
+    print("-------------- RouteChat --------------")
+    guide_route_chat(route_guide_stub)
 
 
 if __name__ == '__main__':
-  run()
+    run()
diff --git a/examples/python/multiplex/multiplex_server.py b/examples/python/multiplex/multiplex_server.py
index 9a6e835..e2ff671 100644
--- a/examples/python/multiplex/multiplex_server.py
+++ b/examples/python/multiplex/multiplex_server.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """A gRPC server servicing both Greeter and RouteGuide RPCs."""
 
 from concurrent import futures
@@ -30,107 +29,111 @@
 
 
 def _get_feature(feature_db, point):
-  """Returns Feature at given location or None."""
-  for feature in feature_db:
-    if feature.location == point:
-      return feature
-  return None
+    """Returns Feature at given location or None."""
+    for feature in feature_db:
+        if feature.location == point:
+            return feature
+    return None
 
 
 def _get_distance(start, end):
-  """Distance between two points."""
-  coord_factor = 10000000.0
-  lat_1 = start.latitude / coord_factor
-  lat_2 = end.latitude / coord_factor
-  lon_1 = start.longitude / coord_factor
-  lon_2 = end.longitude / coord_factor
-  lat_rad_1 = math.radians(lat_1)
-  lat_rad_2 = math.radians(lat_2)
-  delta_lat_rad = math.radians(lat_2 - lat_1)
-  delta_lon_rad = math.radians(lon_2 - lon_1)
+    """Distance between two points."""
+    coord_factor = 10000000.0
+    lat_1 = start.latitude / coord_factor
+    lat_2 = end.latitude / coord_factor
+    lon_1 = start.longitude / coord_factor
+    lon_2 = end.longitude / coord_factor
+    lat_rad_1 = math.radians(lat_1)
+    lat_rad_2 = math.radians(lat_2)
+    delta_lat_rad = math.radians(lat_2 - lat_1)
+    delta_lon_rad = math.radians(lon_2 - lon_1)
 
-  a = (pow(math.sin(delta_lat_rad / 2), 2) +
-       (math.cos(lat_rad_1) * math.cos(lat_rad_2) *
-        pow(math.sin(delta_lon_rad / 2), 2)))
-  c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
-  R = 6371000; # metres
-  return R * c;
+    a = (pow(math.sin(delta_lat_rad / 2), 2) +
+         (math.cos(lat_rad_1) * math.cos(lat_rad_2) * pow(
+             math.sin(delta_lon_rad / 2), 2)))
+    c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
+    R = 6371000
+    # metres
+    return R * c
 
 
 class _GreeterServicer(helloworld_pb2_grpc.GreeterServicer):
 
-  def SayHello(self, request, context):
-    return helloworld_pb2.HelloReply(message='Hello, {}!'.format(request.name))
+    def SayHello(self, request, context):
+        return helloworld_pb2.HelloReply(
+            message='Hello, {}!'.format(request.name))
 
 
 class _RouteGuideServicer(route_guide_pb2_grpc.RouteGuideServicer):
-  """Provides methods that implement functionality of route guide server."""
+    """Provides methods that implement functionality of route guide server."""
 
-  def __init__(self):
-    self.db = route_guide_resources.read_route_guide_database()
+    def __init__(self):
+        self.db = route_guide_resources.read_route_guide_database()
 
-  def GetFeature(self, request, context):
-    feature = _get_feature(self.db, request)
-    if feature is None:
-      return route_guide_pb2.Feature(name="", location=request)
-    else:
-      return feature
+    def GetFeature(self, request, context):
+        feature = _get_feature(self.db, request)
+        if feature is None:
+            return route_guide_pb2.Feature(name="", location=request)
+        else:
+            return feature
 
-  def ListFeatures(self, request, context):
-    left = min(request.lo.longitude, request.hi.longitude)
-    right = max(request.lo.longitude, request.hi.longitude)
-    top = max(request.lo.latitude, request.hi.latitude)
-    bottom = min(request.lo.latitude, request.hi.latitude)
-    for feature in self.db:
-      if (feature.location.longitude >= left and
-          feature.location.longitude <= right and
-          feature.location.latitude >= bottom and
-          feature.location.latitude <= top):
-        yield feature
+    def ListFeatures(self, request, context):
+        left = min(request.lo.longitude, request.hi.longitude)
+        right = max(request.lo.longitude, request.hi.longitude)
+        top = max(request.lo.latitude, request.hi.latitude)
+        bottom = min(request.lo.latitude, request.hi.latitude)
+        for feature in self.db:
+            if (feature.location.longitude >= left and
+                    feature.location.longitude <= right and
+                    feature.location.latitude >= bottom and
+                    feature.location.latitude <= top):
+                yield feature
 
-  def RecordRoute(self, request_iterator, context):
-    point_count = 0
-    feature_count = 0
-    distance = 0.0
-    prev_point = None
+    def RecordRoute(self, request_iterator, context):
+        point_count = 0
+        feature_count = 0
+        distance = 0.0
+        prev_point = None
 
-    start_time = time.time()
-    for point in request_iterator:
-      point_count += 1
-      if _get_feature(self.db, point):
-        feature_count += 1
-      if prev_point:
-        distance += _get_distance(prev_point, point)
-      prev_point = point
+        start_time = time.time()
+        for point in request_iterator:
+            point_count += 1
+            if _get_feature(self.db, point):
+                feature_count += 1
+            if prev_point:
+                distance += _get_distance(prev_point, point)
+            prev_point = point
 
-    elapsed_time = time.time() - start_time
-    return route_guide_pb2.RouteSummary(point_count=point_count,
-                                        feature_count=feature_count,
-                                        distance=int(distance),
-                                        elapsed_time=int(elapsed_time))
+        elapsed_time = time.time() - start_time
+        return route_guide_pb2.RouteSummary(
+            point_count=point_count,
+            feature_count=feature_count,
+            distance=int(distance),
+            elapsed_time=int(elapsed_time))
 
-  def RouteChat(self, request_iterator, context):
-    prev_notes = []
-    for new_note in request_iterator:
-      for prev_note in prev_notes:
-        if prev_note.location == new_note.location:
-          yield prev_note
-      prev_notes.append(new_note)
+    def RouteChat(self, request_iterator, context):
+        prev_notes = []
+        for new_note in request_iterator:
+            for prev_note in prev_notes:
+                if prev_note.location == new_note.location:
+                    yield prev_note
+            prev_notes.append(new_note)
 
 
 def serve():
-  server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
-  helloworld_pb2_grpc.add_GreeterServicer_to_server(_GreeterServicer(), server)
-  route_guide_pb2_grpc.add_RouteGuideServicer_to_server(
-      _RouteGuideServicer(), server)
-  server.add_insecure_port('[::]:50051')
-  server.start()
-  try:
-    while True:
-      time.sleep(_ONE_DAY_IN_SECONDS)
-  except KeyboardInterrupt:
-    server.stop(0)
+    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+    helloworld_pb2_grpc.add_GreeterServicer_to_server(_GreeterServicer(),
+                                                      server)
+    route_guide_pb2_grpc.add_RouteGuideServicer_to_server(_RouteGuideServicer(),
+                                                          server)
+    server.add_insecure_port('[::]:50051')
+    server.start()
+    try:
+        while True:
+            time.sleep(_ONE_DAY_IN_SECONDS)
+    except KeyboardInterrupt:
+        server.stop(0)
 
 
 if __name__ == '__main__':
-  serve()
+    serve()
diff --git a/examples/python/multiplex/route_guide_resources.py b/examples/python/multiplex/route_guide_resources.py
index 0887863..ace85d6 100644
--- a/examples/python/multiplex/route_guide_resources.py
+++ b/examples/python/multiplex/route_guide_resources.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Common resources used in the gRPC route guide example."""
 
 import json
@@ -20,19 +19,19 @@
 
 
 def read_route_guide_database():
-  """Reads the route guide database.
+    """Reads the route guide database.
 
   Returns:
     The full contents of the route guide database as a sequence of
       route_guide_pb2.Features.
   """
-  feature_list = []
-  with open("route_guide_db.json") as route_guide_db_file:
-    for item in json.load(route_guide_db_file):
-      feature = route_guide_pb2.Feature(
-          name=item["name"],
-          location=route_guide_pb2.Point(
-              latitude=item["location"]["latitude"],
-              longitude=item["location"]["longitude"]))
-      feature_list.append(feature)
-  return feature_list
+    feature_list = []
+    with open("route_guide_db.json") as route_guide_db_file:
+        for item in json.load(route_guide_db_file):
+            feature = route_guide_pb2.Feature(
+                name=item["name"],
+                location=route_guide_pb2.Point(
+                    latitude=item["location"]["latitude"],
+                    longitude=item["location"]["longitude"]))
+            feature_list.append(feature)
+    return feature_list
diff --git a/examples/python/multiplex/run_codegen.py b/examples/python/multiplex/run_codegen.py
index f38d86c..d960c3c 100644
--- a/examples/python/multiplex/run_codegen.py
+++ b/examples/python/multiplex/run_codegen.py
@@ -11,26 +11,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Generates protocol messages and gRPC stubs."""
 
 from grpc_tools import protoc
 
-protoc.main(
-    (
-        '',
-        '-I../../protos',
-        '--python_out=.',
-        '--grpc_python_out=.',
-        '../../protos/helloworld.proto',
-    )
-)
-protoc.main(
-    (
-        '',
-        '-I../../protos',
-        '--python_out=.',
-        '--grpc_python_out=.',
-        '../../protos/route_guide.proto',
-    )
-)
+protoc.main(('', '-I../../protos', '--python_out=.', '--grpc_python_out=.',
+             '../../protos/helloworld.proto',))
+protoc.main(('', '-I../../protos', '--python_out=.', '--grpc_python_out=.',
+             '../../protos/route_guide.proto',))
diff --git a/examples/python/route_guide/route_guide_client.py b/examples/python/route_guide/route_guide_client.py
index a0e32fb..c9d0e96 100644
--- a/examples/python/route_guide/route_guide_client.py
+++ b/examples/python/route_guide/route_guide_client.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """The Python implementation of the gRPC route guide client."""
 
 from __future__ import print_function
@@ -26,89 +25,91 @@
 
 
 def make_route_note(message, latitude, longitude):
-  return route_guide_pb2.RouteNote(
-      message=message,
-      location=route_guide_pb2.Point(latitude=latitude, longitude=longitude))
+    return route_guide_pb2.RouteNote(
+        message=message,
+        location=route_guide_pb2.Point(latitude=latitude, longitude=longitude))
 
 
 def guide_get_one_feature(stub, point):
-  feature = stub.GetFeature(point)
-  if not feature.location:
-    print("Server returned incomplete feature")
-    return
+    feature = stub.GetFeature(point)
+    if not feature.location:
+        print("Server returned incomplete feature")
+        return
 
-  if feature.name:
-    print("Feature called %s at %s" % (feature.name, feature.location))
-  else:
-    print("Found no feature at %s" % feature.location)
+    if feature.name:
+        print("Feature called %s at %s" % (feature.name, feature.location))
+    else:
+        print("Found no feature at %s" % feature.location)
 
 
 def guide_get_feature(stub):
-  guide_get_one_feature(stub, route_guide_pb2.Point(latitude=409146138, longitude=-746188906))
-  guide_get_one_feature(stub, route_guide_pb2.Point(latitude=0, longitude=0))
+    guide_get_one_feature(
+        stub, route_guide_pb2.Point(latitude=409146138, longitude=-746188906))
+    guide_get_one_feature(stub, route_guide_pb2.Point(latitude=0, longitude=0))
 
 
 def guide_list_features(stub):
-  rectangle = route_guide_pb2.Rectangle(
-      lo=route_guide_pb2.Point(latitude=400000000, longitude=-750000000),
-      hi=route_guide_pb2.Point(latitude=420000000, longitude=-730000000))
-  print("Looking for features between 40, -75 and 42, -73")
+    rectangle = route_guide_pb2.Rectangle(
+        lo=route_guide_pb2.Point(latitude=400000000, longitude=-750000000),
+        hi=route_guide_pb2.Point(latitude=420000000, longitude=-730000000))
+    print("Looking for features between 40, -75 and 42, -73")
 
-  features = stub.ListFeatures(rectangle)
+    features = stub.ListFeatures(rectangle)
 
-  for feature in features:
-    print("Feature called %s at %s" % (feature.name, feature.location))
+    for feature in features:
+        print("Feature called %s at %s" % (feature.name, feature.location))
 
 
 def generate_route(feature_list):
-  for _ in range(0, 10):
-    random_feature = feature_list[random.randint(0, len(feature_list) - 1)]
-    print("Visiting point %s" % random_feature.location)
-    yield random_feature.location
+    for _ in range(0, 10):
+        random_feature = feature_list[random.randint(0, len(feature_list) - 1)]
+        print("Visiting point %s" % random_feature.location)
+        yield random_feature.location
 
 
 def guide_record_route(stub):
-  feature_list = route_guide_resources.read_route_guide_database()
+    feature_list = route_guide_resources.read_route_guide_database()
 
-  route_iterator = generate_route(feature_list)
-  route_summary = stub.RecordRoute(route_iterator)
-  print("Finished trip with %s points " % route_summary.point_count)
-  print("Passed %s features " % route_summary.feature_count)
-  print("Travelled %s meters " % route_summary.distance)
-  print("It took %s seconds " % route_summary.elapsed_time)
+    route_iterator = generate_route(feature_list)
+    route_summary = stub.RecordRoute(route_iterator)
+    print("Finished trip with %s points " % route_summary.point_count)
+    print("Passed %s features " % route_summary.feature_count)
+    print("Travelled %s meters " % route_summary.distance)
+    print("It took %s seconds " % route_summary.elapsed_time)
 
 
 def generate_messages():
-  messages = [
-      make_route_note("First message", 0, 0),
-      make_route_note("Second message", 0, 1),
-      make_route_note("Third message", 1, 0),
-      make_route_note("Fourth message", 0, 0),
-      make_route_note("Fifth message", 1, 0),
-  ]
-  for msg in messages:
-    print("Sending %s at %s" % (msg.message, msg.location))
-    yield msg
+    messages = [
+        make_route_note("First message", 0, 0),
+        make_route_note("Second message", 0, 1),
+        make_route_note("Third message", 1, 0),
+        make_route_note("Fourth message", 0, 0),
+        make_route_note("Fifth message", 1, 0),
+    ]
+    for msg in messages:
+        print("Sending %s at %s" % (msg.message, msg.location))
+        yield msg
 
 
 def guide_route_chat(stub):
-  responses = stub.RouteChat(generate_messages())
-  for response in responses:
-    print("Received message %s at %s" % (response.message, response.location))
+    responses = stub.RouteChat(generate_messages())
+    for response in responses:
+        print("Received message %s at %s" %
+              (response.message, response.location))
 
 
 def run():
-  channel = grpc.insecure_channel('localhost:50051')
-  stub = route_guide_pb2_grpc.RouteGuideStub(channel)
-  print("-------------- GetFeature --------------")
-  guide_get_feature(stub)
-  print("-------------- ListFeatures --------------")
-  guide_list_features(stub)
-  print("-------------- RecordRoute --------------")
-  guide_record_route(stub)
-  print("-------------- RouteChat --------------")
-  guide_route_chat(stub)
+    channel = grpc.insecure_channel('localhost:50051')
+    stub = route_guide_pb2_grpc.RouteGuideStub(channel)
+    print("-------------- GetFeature --------------")
+    guide_get_feature(stub)
+    print("-------------- ListFeatures --------------")
+    guide_list_features(stub)
+    print("-------------- RecordRoute --------------")
+    guide_record_route(stub)
+    print("-------------- RouteChat --------------")
+    guide_route_chat(stub)
 
 
 if __name__ == '__main__':
-  run()
+    run()
diff --git a/examples/python/route_guide/route_guide_resources.py b/examples/python/route_guide/route_guide_resources.py
index 0887863..ace85d6 100644
--- a/examples/python/route_guide/route_guide_resources.py
+++ b/examples/python/route_guide/route_guide_resources.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Common resources used in the gRPC route guide example."""
 
 import json
@@ -20,19 +19,19 @@
 
 
 def read_route_guide_database():
-  """Reads the route guide database.
+    """Reads the route guide database.
 
   Returns:
     The full contents of the route guide database as a sequence of
       route_guide_pb2.Features.
   """
-  feature_list = []
-  with open("route_guide_db.json") as route_guide_db_file:
-    for item in json.load(route_guide_db_file):
-      feature = route_guide_pb2.Feature(
-          name=item["name"],
-          location=route_guide_pb2.Point(
-              latitude=item["location"]["latitude"],
-              longitude=item["location"]["longitude"]))
-      feature_list.append(feature)
-  return feature_list
+    feature_list = []
+    with open("route_guide_db.json") as route_guide_db_file:
+        for item in json.load(route_guide_db_file):
+            feature = route_guide_pb2.Feature(
+                name=item["name"],
+                location=route_guide_pb2.Point(
+                    latitude=item["location"]["latitude"],
+                    longitude=item["location"]["longitude"]))
+            feature_list.append(feature)
+    return feature_list
diff --git a/examples/python/route_guide/route_guide_server.py b/examples/python/route_guide/route_guide_server.py
index a0aa5fd..46f3322 100644
--- a/examples/python/route_guide/route_guide_server.py
+++ b/examples/python/route_guide/route_guide_server.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """The Python implementation of the gRPC route guide server."""
 
 from concurrent import futures
@@ -28,98 +27,102 @@
 
 
 def get_feature(feature_db, point):
-  """Returns Feature at given location or None."""
-  for feature in feature_db:
-    if feature.location == point:
-      return feature
-  return None
+    """Returns Feature at given location or None."""
+    for feature in feature_db:
+        if feature.location == point:
+            return feature
+    return None
 
 
 def get_distance(start, end):
-  """Distance between two points."""
-  coord_factor = 10000000.0
-  lat_1 = start.latitude / coord_factor
-  lat_2 = end.latitude / coord_factor
-  lon_1 = start.longitude / coord_factor
-  lon_2 = end.longitude / coord_factor
-  lat_rad_1 = math.radians(lat_1)
-  lat_rad_2 = math.radians(lat_2)
-  delta_lat_rad = math.radians(lat_2 - lat_1)
-  delta_lon_rad = math.radians(lon_2 - lon_1)
+    """Distance between two points."""
+    coord_factor = 10000000.0
+    lat_1 = start.latitude / coord_factor
+    lat_2 = end.latitude / coord_factor
+    lon_1 = start.longitude / coord_factor
+    lon_2 = end.longitude / coord_factor
+    lat_rad_1 = math.radians(lat_1)
+    lat_rad_2 = math.radians(lat_2)
+    delta_lat_rad = math.radians(lat_2 - lat_1)
+    delta_lon_rad = math.radians(lon_2 - lon_1)
 
-  a = (pow(math.sin(delta_lat_rad / 2), 2) +
-       (math.cos(lat_rad_1) * math.cos(lat_rad_2) *
-        pow(math.sin(delta_lon_rad / 2), 2)))
-  c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
-  R = 6371000; # metres
-  return R * c;
+    a = (pow(math.sin(delta_lat_rad / 2), 2) +
+         (math.cos(lat_rad_1) * math.cos(lat_rad_2) * pow(
+             math.sin(delta_lon_rad / 2), 2)))
+    c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
+    R = 6371000
+    # metres
+    return R * c
+
 
 class RouteGuideServicer(route_guide_pb2_grpc.RouteGuideServicer):
-  """Provides methods that implement functionality of route guide server."""
+    """Provides methods that implement functionality of route guide server."""
 
-  def __init__(self):
-    self.db = route_guide_resources.read_route_guide_database()
+    def __init__(self):
+        self.db = route_guide_resources.read_route_guide_database()
 
-  def GetFeature(self, request, context):
-    feature = get_feature(self.db, request)
-    if feature is None:
-      return route_guide_pb2.Feature(name="", location=request)
-    else:
-      return feature
+    def GetFeature(self, request, context):
+        feature = get_feature(self.db, request)
+        if feature is None:
+            return route_guide_pb2.Feature(name="", location=request)
+        else:
+            return feature
 
-  def ListFeatures(self, request, context):
-    left = min(request.lo.longitude, request.hi.longitude)
-    right = max(request.lo.longitude, request.hi.longitude)
-    top = max(request.lo.latitude, request.hi.latitude)
-    bottom = min(request.lo.latitude, request.hi.latitude)
-    for feature in self.db:
-      if (feature.location.longitude >= left and
-          feature.location.longitude <= right and
-          feature.location.latitude >= bottom and
-          feature.location.latitude <= top):
-        yield feature
+    def ListFeatures(self, request, context):
+        left = min(request.lo.longitude, request.hi.longitude)
+        right = max(request.lo.longitude, request.hi.longitude)
+        top = max(request.lo.latitude, request.hi.latitude)
+        bottom = min(request.lo.latitude, request.hi.latitude)
+        for feature in self.db:
+            if (feature.location.longitude >= left and
+                    feature.location.longitude <= right and
+                    feature.location.latitude >= bottom and
+                    feature.location.latitude <= top):
+                yield feature
 
-  def RecordRoute(self, request_iterator, context):
-    point_count = 0
-    feature_count = 0
-    distance = 0.0
-    prev_point = None
+    def RecordRoute(self, request_iterator, context):
+        point_count = 0
+        feature_count = 0
+        distance = 0.0
+        prev_point = None
 
-    start_time = time.time()
-    for point in request_iterator:
-      point_count += 1
-      if get_feature(self.db, point):
-        feature_count += 1
-      if prev_point:
-        distance += get_distance(prev_point, point)
-      prev_point = point
+        start_time = time.time()
+        for point in request_iterator:
+            point_count += 1
+            if get_feature(self.db, point):
+                feature_count += 1
+            if prev_point:
+                distance += get_distance(prev_point, point)
+            prev_point = point
 
-    elapsed_time = time.time() - start_time
-    return route_guide_pb2.RouteSummary(point_count=point_count,
-                                        feature_count=feature_count,
-                                        distance=int(distance),
-                                        elapsed_time=int(elapsed_time))
+        elapsed_time = time.time() - start_time
+        return route_guide_pb2.RouteSummary(
+            point_count=point_count,
+            feature_count=feature_count,
+            distance=int(distance),
+            elapsed_time=int(elapsed_time))
 
-  def RouteChat(self, request_iterator, context):
-    prev_notes = []
-    for new_note in request_iterator:
-      for prev_note in prev_notes:
-        if prev_note.location == new_note.location:
-          yield prev_note
-      prev_notes.append(new_note)
+    def RouteChat(self, request_iterator, context):
+        prev_notes = []
+        for new_note in request_iterator:
+            for prev_note in prev_notes:
+                if prev_note.location == new_note.location:
+                    yield prev_note
+            prev_notes.append(new_note)
 
 
 def serve():
-  server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
-  route_guide_pb2_grpc.add_RouteGuideServicer_to_server(
-      RouteGuideServicer(), server)
-  server.add_insecure_port('[::]:50051')
-  server.start()
-  try:
-    while True:
-      time.sleep(_ONE_DAY_IN_SECONDS)
-  except KeyboardInterrupt:
-    server.stop(0)
+    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+    route_guide_pb2_grpc.add_RouteGuideServicer_to_server(RouteGuideServicer(),
+                                                          server)
+    server.add_insecure_port('[::]:50051')
+    server.start()
+    try:
+        while True:
+            time.sleep(_ONE_DAY_IN_SECONDS)
+    except KeyboardInterrupt:
+        server.stop(0)
+
 
 if __name__ == '__main__':
-  serve()
+    serve()
diff --git a/examples/python/route_guide/run_codegen.py b/examples/python/route_guide/run_codegen.py
index 4b61cf4..1ec7fcd 100644
--- a/examples/python/route_guide/run_codegen.py
+++ b/examples/python/route_guide/run_codegen.py
@@ -11,17 +11,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Runs protoc with the gRPC plugin to generate messages and gRPC stubs."""
 
 from grpc_tools import protoc
 
-protoc.main(
-    (
-	'',
-	'-I../../protos',
-	'--python_out=.',
-	'--grpc_python_out=.',
-	'../../protos/route_guide.proto',
-    )
-)
+protoc.main(('', '-I../../protos', '--python_out=.', '--grpc_python_out=.',
+             '../../protos/route_guide.proto',))
diff --git a/include/grpc/support/log.h b/include/grpc/support/log.h
index 9cce4b1..a8371cb 100644
--- a/include/grpc/support/log.h
+++ b/include/grpc/support/log.h
@@ -73,12 +73,14 @@
 /** Log overrides: applications can use this API to intercept logging calls
    and use their own implementations */
 
-typedef struct {
+struct gpr_log_func_args {
   const char* file;
   int line;
   gpr_log_severity severity;
   const char* message;
-} gpr_log_func_args;
+};
+
+typedef struct gpr_log_func_args gpr_log_func_args;
 
 typedef void (*gpr_log_func)(gpr_log_func_args* args);
 GPRAPI void gpr_set_log_function(gpr_log_func func);
diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc
index dec8cd0..965d91b 100644
--- a/src/compiler/cpp_generator.cc
+++ b/src/compiler/cpp_generator.cc
@@ -1383,6 +1383,7 @@
                  "std::unique_ptr< $ns$$Service$::Stub> $ns$$Service$::NewStub("
                  "const std::shared_ptr< ::grpc::ChannelInterface>& channel, "
                  "const ::grpc::StubOptions& options) {\n"
+                 "  (void)options;\n"
                  "  std::unique_ptr< $ns$$Service$::Stub> stub(new "
                  "$ns$$Service$::Stub(channel));\n"
                  "  return stub;\n"
diff --git a/src/compiler/csharp_generator.cc b/src/compiler/csharp_generator.cc
index 40fe0b0..7c97056 100644
--- a/src/compiler/csharp_generator.cc
+++ b/src/compiler/csharp_generator.cc
@@ -659,8 +659,11 @@
     }
 
     // Write out a file header.
-    out.Print("// Generated by the protocol buffer compiler.  DO NOT EDIT!\n");
-    out.Print("// source: $filename$\n", "filename", file->name());
+    out.Print("// <auto-generated>\n");
+    out.Print(
+        "//     Generated by the protocol buffer compiler.  DO NOT EDIT!\n");
+    out.Print("//     source: $filename$\n", "filename", file->name());
+    out.Print("// </auto-generated>\n");
 
     // use C++ style as there are no file-level XML comments in .NET
     grpc::string leading_comments = GetCsharpComments(file, true);
diff --git a/src/core/lib/compression/stream_compression_gzip.cc b/src/core/lib/compression/stream_compression_gzip.cc
index 9d829b3..897f391 100644
--- a/src/core/lib/compression/stream_compression_gzip.cc
+++ b/src/core/lib/compression/stream_compression_gzip.cc
@@ -114,7 +114,8 @@
     if (ctx->zs.avail_out == 0) {
       grpc_slice_buffer_add(out, slice_out);
     } else if (ctx->zs.avail_out < slice_size) {
-      slice_out.data.refcounted.length -= ctx->zs.avail_out;
+      size_t len = GRPC_SLICE_LENGTH(slice_out);
+      GRPC_SLICE_SET_LENGTH(slice_out, len - ctx->zs.avail_out);
       grpc_slice_buffer_add(out, slice_out);
     } else {
       grpc_slice_unref_internal(slice_out);
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index d9e8a30..ae9d47e 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -738,7 +738,7 @@
       }
 
       if (gpr_cv_wait(&worker->cv, &pollset->mu,
-                      grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME)) &&
+                      grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)) &&
           worker->state == UNKICKED) {
         /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
            received a kick */
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index 006e3dd..53de94f 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -1471,7 +1471,7 @@
       decref_poll_result(result);
       // Leave this polling thread alive for a grace period to do another poll()
       // op
-      gpr_timespec deadline = gpr_now(GPR_CLOCK_REALTIME);
+      gpr_timespec deadline = gpr_now(GPR_CLOCK_MONOTONIC);
       deadline = gpr_time_add(deadline, thread_grace);
       pargs->trigger_set = 0;
       gpr_cv_wait(&pargs->trigger, &g_cvfds.mu, deadline);
@@ -1526,9 +1526,9 @@
     }
   }
 
-  gpr_timespec deadline = gpr_now(GPR_CLOCK_REALTIME);
+  gpr_timespec deadline = gpr_now(GPR_CLOCK_MONOTONIC);
   if (timeout < 0) {
-    deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+    deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
   } else {
     deadline =
         gpr_time_add(deadline, gpr_time_from_millis(timeout, GPR_TIMESPAN));
@@ -1631,7 +1631,7 @@
   // Not doing so will result in reported memory leaks
   if (!gpr_unref(&g_cvfds.pollcount)) {
     int res = gpr_cv_wait(&g_cvfds.shutdown_cv, &g_cvfds.mu,
-                          gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+                          gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
                                        gpr_time_from_seconds(3, GPR_TIMESPAN)));
     GPR_ASSERT(res == 0);
   }
diff --git a/src/core/lib/iomgr/executor.cc b/src/core/lib/iomgr/executor.cc
index b45223c..b7288d5 100644
--- a/src/core/lib/iomgr/executor.cc
+++ b/src/core/lib/iomgr/executor.cc
@@ -155,7 +155,7 @@
     ts->depth -= subtract_depth;
     while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
       ts->queued_long_job = false;
-      gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+      gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
     }
     if (ts->shutdown) {
       if (executor_trace.enabled()) {
diff --git a/src/core/lib/iomgr/iomgr.cc b/src/core/lib/iomgr/iomgr.cc
index dacf08e..70807c4 100644
--- a/src/core/lib/iomgr/iomgr.cc
+++ b/src/core/lib/iomgr/iomgr.cc
@@ -118,7 +118,7 @@
           abort();
         }
         gpr_timespec short_deadline =
-            gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+            gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
                          gpr_time_from_millis(100, GPR_TIMESPAN));
         if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
           if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) >
diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc
index cabe28e..eaf2f5d 100644
--- a/src/core/lib/iomgr/resource_quota.cc
+++ b/src/core/lib/iomgr/resource_quota.cc
@@ -507,6 +507,7 @@
     gpr_log(GPR_DEBUG, "RU shutdown %p", ru);
   }
   grpc_resource_user* resource_user = (grpc_resource_user*)ru;
+  gpr_mu_lock(&resource_user->mu);
   GRPC_CLOSURE_SCHED(resource_user->reclaimers[0], GRPC_ERROR_CANCELLED);
   GRPC_CLOSURE_SCHED(resource_user->reclaimers[1], GRPC_ERROR_CANCELLED);
   resource_user->reclaimers[0] = nullptr;
@@ -516,6 +517,7 @@
   if (resource_user->allocating) {
     rq_step_sched(resource_user->resource_quota);
   }
+  gpr_mu_unlock(&resource_user->mu);
 }
 
 static void ru_destroy(void* ru, grpc_error* error) {
diff --git a/src/core/lib/iomgr/tcp_client_posix.cc b/src/core/lib/iomgr/tcp_client_posix.cc
index 15062a5..24ccab1 100644
--- a/src/core/lib/iomgr/tcp_client_posix.cc
+++ b/src/core/lib/iomgr/tcp_client_posix.cc
@@ -236,65 +236,68 @@
   GRPC_CLOSURE_SCHED(closure, error);
 }
 
-static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
-                                    grpc_pollset_set* interested_parties,
-                                    const grpc_channel_args* channel_args,
-                                    const grpc_resolved_address* addr,
-                                    grpc_millis deadline) {
-  int fd;
+grpc_error* grpc_tcp_client_prepare_fd(const grpc_channel_args* channel_args,
+                                       const grpc_resolved_address* addr,
+                                       grpc_resolved_address* mapped_addr,
+                                       grpc_fd** fdobj) {
   grpc_dualstack_mode dsmode;
-  int err;
-  async_connect* ac;
-  grpc_resolved_address addr6_v4mapped;
-  grpc_resolved_address addr4_copy;
-  grpc_fd* fdobj;
+  int fd;
+  grpc_error* error;
   char* name;
   char* addr_str;
-  grpc_error* error;
-
-  *ep = nullptr;
-
-  /* Use dualstack sockets where available. */
-  if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
-    addr = &addr6_v4mapped;
+  *fdobj = nullptr;
+  /* Use dualstack sockets where available. Set mapped to v6 or v4 mapped to
+     v6. */
+  if (!grpc_sockaddr_to_v4mapped(addr, mapped_addr)) {
+    /* addr is v4 mapped to v6 or v6. */
+    memcpy(mapped_addr, addr, sizeof(*mapped_addr));
   }
-
-  error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
+  error =
+      grpc_create_dualstack_socket(mapped_addr, SOCK_STREAM, 0, &dsmode, &fd);
   if (error != GRPC_ERROR_NONE) {
-    GRPC_CLOSURE_SCHED(closure, error);
-    return;
+    return error;
   }
   if (dsmode == GRPC_DSMODE_IPV4) {
-    /* If we got an AF_INET socket, map the address back to IPv4. */
-    GPR_ASSERT(grpc_sockaddr_is_v4mapped(addr, &addr4_copy));
-    addr = &addr4_copy;
+    /* Original addr is either v4 or v4 mapped to v6. Set mapped_addr to v4. */
+    if (!grpc_sockaddr_is_v4mapped(addr, mapped_addr)) {
+      memcpy(mapped_addr, addr, sizeof(*mapped_addr));
+    }
   }
-  if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) {
-    GRPC_CLOSURE_SCHED(closure, error);
-    return;
+  if ((error = prepare_socket(mapped_addr, fd, channel_args)) !=
+      GRPC_ERROR_NONE) {
+    return error;
   }
+  addr_str = grpc_sockaddr_to_uri(mapped_addr);
+  gpr_asprintf(&name, "tcp-client:%s", addr_str);
+  *fdobj = grpc_fd_create(fd, name);
+  gpr_free(name);
+  gpr_free(addr_str);
+  return GRPC_ERROR_NONE;
+}
 
+void grpc_tcp_client_create_from_prepared_fd(
+    grpc_pollset_set* interested_parties, grpc_closure* closure, grpc_fd* fdobj,
+    const grpc_channel_args* channel_args, const grpc_resolved_address* addr,
+    grpc_millis deadline, grpc_endpoint** ep) {
+  const int fd = grpc_fd_wrapped_fd(fdobj);
+  int err;
+  async_connect* ac;
   do {
     GPR_ASSERT(addr->len < ~(socklen_t)0);
     err = connect(fd, (const struct sockaddr*)addr->addr, (socklen_t)addr->len);
   } while (err < 0 && errno == EINTR);
-
-  addr_str = grpc_sockaddr_to_uri(addr);
-  gpr_asprintf(&name, "tcp-client:%s", addr_str);
-
-  fdobj = grpc_fd_create(fd, name);
-
   if (err >= 0) {
+    char* addr_str = grpc_sockaddr_to_uri(addr);
     *ep = grpc_tcp_client_create_from_fd(fdobj, channel_args, addr_str);
+    gpr_free(addr_str);
     GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
-    goto done;
+    return;
   }
-
   if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
     grpc_fd_orphan(fdobj, nullptr, nullptr, false /* already_closed */,
                    "tcp_client_connect_error");
     GRPC_CLOSURE_SCHED(closure, GRPC_OS_ERROR(errno, "connect"));
-    goto done;
+    return;
   }
 
   grpc_pollset_set_add_fd(interested_parties, fdobj);
@@ -304,8 +307,7 @@
   ac->ep = ep;
   ac->fd = fdobj;
   ac->interested_parties = interested_parties;
-  ac->addr_str = addr_str;
-  addr_str = nullptr;
+  ac->addr_str = grpc_sockaddr_to_uri(addr);
   gpr_mu_init(&ac->mu);
   ac->refs = 2;
   GRPC_CLOSURE_INIT(&ac->write_closure, on_writable, ac,
@@ -322,10 +324,25 @@
   grpc_timer_init(&ac->alarm, deadline, &ac->on_alarm);
   grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
   gpr_mu_unlock(&ac->mu);
+}
 
-done:
-  gpr_free(name);
-  gpr_free(addr_str);
+static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
+                                    grpc_pollset_set* interested_parties,
+                                    const grpc_channel_args* channel_args,
+                                    const grpc_resolved_address* addr,
+                                    grpc_millis deadline) {
+  grpc_resolved_address mapped_addr;
+  grpc_fd* fdobj = nullptr;
+  grpc_error* error;
+  *ep = nullptr;
+  if ((error = grpc_tcp_client_prepare_fd(channel_args, addr, &mapped_addr,
+                                          &fdobj)) != GRPC_ERROR_NONE) {
+    GRPC_CLOSURE_SCHED(closure, error);
+    return;
+  }
+  grpc_tcp_client_create_from_prepared_fd(interested_parties, closure, fdobj,
+                                          channel_args, &mapped_addr, deadline,
+                                          ep);
 }
 
 // overridden by api_fuzzer.c
diff --git a/src/core/lib/iomgr/tcp_client_posix.h b/src/core/lib/iomgr/tcp_client_posix.h
index 7d0f133..57e50a6 100644
--- a/src/core/lib/iomgr/tcp_client_posix.h
+++ b/src/core/lib/iomgr/tcp_client_posix.h
@@ -23,7 +23,44 @@
 #include "src/core/lib/iomgr/ev_posix.h"
 #include "src/core/lib/iomgr/tcp_client.h"
 
+/* Create an endpoint from a connected grpc_fd.
+
+   fd: a connected FD. Ownership is taken.
+   channel_args: may contain custom settings for the endpoint
+   addr_str: destination address in printable format
+   Returns: a new endpoint
+*/
 grpc_endpoint* grpc_tcp_client_create_from_fd(
     grpc_fd* fd, const grpc_channel_args* channel_args, const char* addr_str);
 
+/* Return a configured, unbound, unconnected TCP client grpc_fd.
+
+   channel_args: may contain custom settings for the fd
+   addr: the destination address
+   mapped_addr: out parameter. addr mapped to an address appropriate to the
+     type of socket FD created. For example, if addr is IPv4 and dual stack
+     sockets are available, mapped_addr will be an IPv4-mapped IPv6 address
+   fdobj: out parameter. The new FD
+   Returns: error, if any. Out parameters are not set on error
+*/
+grpc_error* grpc_tcp_client_prepare_fd(const grpc_channel_args* channel_args,
+                                       const grpc_resolved_address* addr,
+                                       grpc_resolved_address* mapped_addr,
+                                       grpc_fd** fdobj);
+
+/* Connect a configured TCP client grpc_fd.
+
+   interested_parties: a set of pollsets that would be interested in this
+     connection being established (in order to continue their work
+   closure: called when complete. On success, *ep will be set.
+   fdobj: an FD returned from grpc_tcp_client_prepare_fd(). Ownership is taken
+   channel_args: may contain custom settings for the endpoint
+   deadline: connection deadline
+   ep: out parameter. Set before closure is called if successful
+*/
+void grpc_tcp_client_create_from_prepared_fd(
+    grpc_pollset_set* interested_parties, grpc_closure* closure, grpc_fd* fdobj,
+    const grpc_channel_args* channel_args, const grpc_resolved_address* addr,
+    grpc_millis deadline, grpc_endpoint** ep);
+
 #endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H */
diff --git a/src/core/lib/iomgr/tcp_client_windows.cc b/src/core/lib/iomgr/tcp_client_windows.cc
index 5521a0a..97aa923 100644
--- a/src/core/lib/iomgr/tcp_client_windows.cc
+++ b/src/core/lib/iomgr/tcp_client_windows.cc
@@ -103,6 +103,7 @@
       GPR_ASSERT(transfered_bytes == 0);
       if (!wsa_success) {
         error = GRPC_WSA_ERROR(WSAGetLastError(), "ConnectEx");
+        closesocket(socket->socket);
       } else {
         *ep = grpc_tcp_create(socket, ac->channel_args, ac->addr_name);
         socket = NULL;
diff --git a/src/core/lib/iomgr/timer_manager.cc b/src/core/lib/iomgr/timer_manager.cc
index 8ca6a3c..7fb068f 100644
--- a/src/core/lib/iomgr/timer_manager.cc
+++ b/src/core/lib/iomgr/timer_manager.cc
@@ -192,7 +192,7 @@
     }
 
     gpr_cv_wait(&g_cv_wait, &g_mu,
-                grpc_millis_to_timespec(next, GPR_CLOCK_REALTIME));
+                grpc_millis_to_timespec(next, GPR_CLOCK_MONOTONIC));
 
     if (grpc_timer_check_trace.enabled()) {
       gpr_log(GPR_DEBUG, "wait ended: was_timed:%d kicked:%d",
@@ -317,7 +317,7 @@
       gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count);
     }
     while (g_thread_count > 0) {
-      gpr_cv_wait(&g_cv_shutdown, &g_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+      gpr_cv_wait(&g_cv_shutdown, &g_mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
       if (grpc_timer_check_trace.enabled()) {
         gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count);
       }
diff --git a/src/core/lib/support/sync_posix.cc b/src/core/lib/support/sync_posix.cc
index dfdd233..c3f6b10 100644
--- a/src/core/lib/support/sync_posix.cc
+++ b/src/core/lib/support/sync_posix.cc
@@ -66,7 +66,12 @@
 /*----------------------------------------*/
 
 void gpr_cv_init(gpr_cv* cv) {
-  GPR_ASSERT(pthread_cond_init(cv, nullptr) == 0);
+  pthread_condattr_t attr;
+  GPR_ASSERT(pthread_condattr_init(&attr) == 0);
+#if GPR_LINUX
+  GPR_ASSERT(pthread_condattr_setclock(&attr, CLOCK_MONOTONIC) == 0);
+#endif  // GPR_LINUX
+  GPR_ASSERT(pthread_cond_init(cv, &attr) == 0);
 }
 
 void gpr_cv_destroy(gpr_cv* cv) { GPR_ASSERT(pthread_cond_destroy(cv) == 0); }
@@ -78,7 +83,11 @@
     err = pthread_cond_wait(cv, mu);
   } else {
     struct timespec abs_deadline_ts;
+#if GPR_LINUX
+    abs_deadline = gpr_convert_clock_type(abs_deadline, GPR_CLOCK_MONOTONIC);
+#else
     abs_deadline = gpr_convert_clock_type(abs_deadline, GPR_CLOCK_REALTIME);
+#endif  // GPR_LINUX
     abs_deadline_ts.tv_sec = (time_t)abs_deadline.tv_sec;
     abs_deadline_ts.tv_nsec = abs_deadline.tv_nsec;
     err = pthread_cond_timedwait(cv, mu, &abs_deadline_ts);
diff --git a/src/core/lib/surface/completion_queue.cc b/src/core/lib/surface/completion_queue.cc
index 12385b7..aa5808d 100644
--- a/src/core/lib/surface/completion_queue.cc
+++ b/src/core/lib/surface/completion_queue.cc
@@ -115,7 +115,7 @@
   }
   w.kicked = false;
   gpr_timespec deadline_ts =
-      grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME);
+      grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC);
   while (!npp->shutdown && !w.kicked &&
          !gpr_cv_wait(&w.cv, &npp->mu, deadline_ts))
     ;
diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc
index 4f07183..f1d428f 100644
--- a/src/core/lib/surface/server.cc
+++ b/src/core/lib/surface/server.cc
@@ -1170,7 +1170,7 @@
   gpr_mu_lock(&server->mu_global);
   while (server->starting) {
     gpr_cv_wait(&server->starting_cv, &server->mu_global,
-                gpr_inf_future(GPR_CLOCK_REALTIME));
+                gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
 
   /* stay locked, and gather up some stuff to do */
diff --git a/src/csharp/Grpc.Examples/MathGrpc.cs b/src/csharp/Grpc.Examples/MathGrpc.cs
index 4ed4144..e29b108 100644
--- a/src/csharp/Grpc.Examples/MathGrpc.cs
+++ b/src/csharp/Grpc.Examples/MathGrpc.cs
@@ -1,5 +1,7 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: math/math.proto
+// <auto-generated>
+//     Generated by the protocol buffer compiler.  DO NOT EDIT!
+//     source: math/math.proto
+// </auto-generated>
 // Original file comments:
 // Copyright 2015 gRPC authors.
 //
diff --git a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
index 3e8eb34..24a7259 100644
--- a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
+++ b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
@@ -1,5 +1,7 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: grpc/health/v1/health.proto
+// <auto-generated>
+//     Generated by the protocol buffer compiler.  DO NOT EDIT!
+//     source: grpc/health/v1/health.proto
+// </auto-generated>
 // Original file comments:
 // Copyright 2015 gRPC authors.
 //
diff --git a/src/csharp/Grpc.IntegrationTesting/Control.cs b/src/csharp/Grpc.IntegrationTesting/Control.cs
index b15da8b..8e5da7b 100644
--- a/src/csharp/Grpc.IntegrationTesting/Control.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Control.cs
@@ -522,10 +522,16 @@
       }
       switch (other.LoadCase) {
         case LoadOneofCase.ClosedLoop:
-          ClosedLoop = other.ClosedLoop;
+          if (ClosedLoop == null) {
+            ClosedLoop = new global::Grpc.Testing.ClosedLoopParams();
+          }
+          ClosedLoop.MergeFrom(other.ClosedLoop);
           break;
         case LoadOneofCase.Poisson:
-          Poisson = other.Poisson;
+          if (Poisson == null) {
+            Poisson = new global::Grpc.Testing.PoissonParams();
+          }
+          Poisson.MergeFrom(other.Poisson);
           break;
       }
 
@@ -1901,10 +1907,16 @@
       }
       switch (other.ArgtypeCase) {
         case ArgtypeOneofCase.Setup:
-          Setup = other.Setup;
+          if (Setup == null) {
+            Setup = new global::Grpc.Testing.ClientConfig();
+          }
+          Setup.MergeFrom(other.Setup);
           break;
         case ArgtypeOneofCase.Mark:
-          Mark = other.Mark;
+          if (Mark == null) {
+            Mark = new global::Grpc.Testing.Mark();
+          }
+          Mark.MergeFrom(other.Mark);
           break;
       }
 
@@ -2508,10 +2520,16 @@
       }
       switch (other.ArgtypeCase) {
         case ArgtypeOneofCase.Setup:
-          Setup = other.Setup;
+          if (Setup == null) {
+            Setup = new global::Grpc.Testing.ServerConfig();
+          }
+          Setup.MergeFrom(other.Setup);
           break;
         case ArgtypeOneofCase.Mark:
-          Mark = other.Mark;
+          if (Mark == null) {
+            Mark = new global::Grpc.Testing.Mark();
+          }
+          Mark.MergeFrom(other.Mark);
           break;
       }
 
diff --git a/src/csharp/Grpc.IntegrationTesting/CoreStats/Stats.cs b/src/csharp/Grpc.IntegrationTesting/CoreStats/Stats.cs
new file mode 100644
index 0000000..380294e
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting/CoreStats/Stats.cs
@@ -0,0 +1,623 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: grpc/core/stats.proto
+#pragma warning disable 1591, 0612, 3021
+#region Designer generated code
+
+using pb = global::Google.Protobuf;
+using pbc = global::Google.Protobuf.Collections;
+using pbr = global::Google.Protobuf.Reflection;
+using scg = global::System.Collections.Generic;
+namespace Grpc.Core {
+
+  /// <summary>Holder for reflection information generated from grpc/core/stats.proto</summary>
+  public static partial class StatsReflection {
+
+    #region Descriptor
+    /// <summary>File descriptor for grpc/core/stats.proto</summary>
+    public static pbr::FileDescriptor Descriptor {
+      get { return descriptor; }
+    }
+    private static pbr::FileDescriptor descriptor;
+
+    static StatsReflection() {
+      byte[] descriptorData = global::System.Convert.FromBase64String(
+          string.Concat(
+            "ChVncnBjL2NvcmUvc3RhdHMucHJvdG8SCWdycGMuY29yZSImCgZCdWNrZXQS",
+            "DQoFc3RhcnQYASABKAESDQoFY291bnQYAiABKAQiLwoJSGlzdG9ncmFtEiIK",
+            "B2J1Y2tldHMYASADKAsyES5ncnBjLmNvcmUuQnVja2V0IlsKBk1ldHJpYxIM",
+            "CgRuYW1lGAEgASgJEg8KBWNvdW50GAogASgESAASKQoJaGlzdG9ncmFtGAsg",
+            "ASgLMhQuZ3JwYy5jb3JlLkhpc3RvZ3JhbUgAQgcKBXZhbHVlIisKBVN0YXRz",
+            "EiIKB21ldHJpY3MYASADKAsyES5ncnBjLmNvcmUuTWV0cmljYgZwcm90bzM="));
+      descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
+          new pbr::FileDescriptor[] { },
+          new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Core.Bucket), global::Grpc.Core.Bucket.Parser, new[]{ "Start", "Count" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Core.Histogram), global::Grpc.Core.Histogram.Parser, new[]{ "Buckets" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Core.Metric), global::Grpc.Core.Metric.Parser, new[]{ "Name", "Count", "Histogram" }, new[]{ "Value" }, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Core.Stats), global::Grpc.Core.Stats.Parser, new[]{ "Metrics" }, null, null, null)
+          }));
+    }
+    #endregion
+
+  }
+  #region Messages
+  public sealed partial class Bucket : pb::IMessage<Bucket> {
+    private static readonly pb::MessageParser<Bucket> _parser = new pb::MessageParser<Bucket>(() => new Bucket());
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pb::MessageParser<Bucket> Parser { get { return _parser; } }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pbr::MessageDescriptor Descriptor {
+      get { return global::Grpc.Core.StatsReflection.Descriptor.MessageTypes[0]; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    pbr::MessageDescriptor pb::IMessage.Descriptor {
+      get { return Descriptor; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Bucket() {
+      OnConstruction();
+    }
+
+    partial void OnConstruction();
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Bucket(Bucket other) : this() {
+      start_ = other.start_;
+      count_ = other.count_;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Bucket Clone() {
+      return new Bucket(this);
+    }
+
+    /// <summary>Field number for the "start" field.</summary>
+    public const int StartFieldNumber = 1;
+    private double start_;
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public double Start {
+      get { return start_; }
+      set {
+        start_ = value;
+      }
+    }
+
+    /// <summary>Field number for the "count" field.</summary>
+    public const int CountFieldNumber = 2;
+    private ulong count_;
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public ulong Count {
+      get { return count_; }
+      set {
+        count_ = value;
+      }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override bool Equals(object other) {
+      return Equals(other as Bucket);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public bool Equals(Bucket other) {
+      if (ReferenceEquals(other, null)) {
+        return false;
+      }
+      if (ReferenceEquals(other, this)) {
+        return true;
+      }
+      if (Start != other.Start) return false;
+      if (Count != other.Count) return false;
+      return true;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override int GetHashCode() {
+      int hash = 1;
+      if (Start != 0D) hash ^= Start.GetHashCode();
+      if (Count != 0UL) hash ^= Count.GetHashCode();
+      return hash;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override string ToString() {
+      return pb::JsonFormatter.ToDiagnosticString(this);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void WriteTo(pb::CodedOutputStream output) {
+      if (Start != 0D) {
+        output.WriteRawTag(9);
+        output.WriteDouble(Start);
+      }
+      if (Count != 0UL) {
+        output.WriteRawTag(16);
+        output.WriteUInt64(Count);
+      }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public int CalculateSize() {
+      int size = 0;
+      if (Start != 0D) {
+        size += 1 + 8;
+      }
+      if (Count != 0UL) {
+        size += 1 + pb::CodedOutputStream.ComputeUInt64Size(Count);
+      }
+      return size;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(Bucket other) {
+      if (other == null) {
+        return;
+      }
+      if (other.Start != 0D) {
+        Start = other.Start;
+      }
+      if (other.Count != 0UL) {
+        Count = other.Count;
+      }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(pb::CodedInputStream input) {
+      uint tag;
+      while ((tag = input.ReadTag()) != 0) {
+        switch(tag) {
+          default:
+            input.SkipLastField();
+            break;
+          case 9: {
+            Start = input.ReadDouble();
+            break;
+          }
+          case 16: {
+            Count = input.ReadUInt64();
+            break;
+          }
+        }
+      }
+    }
+
+  }
+
+  public sealed partial class Histogram : pb::IMessage<Histogram> {
+    private static readonly pb::MessageParser<Histogram> _parser = new pb::MessageParser<Histogram>(() => new Histogram());
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pb::MessageParser<Histogram> Parser { get { return _parser; } }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pbr::MessageDescriptor Descriptor {
+      get { return global::Grpc.Core.StatsReflection.Descriptor.MessageTypes[1]; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    pbr::MessageDescriptor pb::IMessage.Descriptor {
+      get { return Descriptor; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Histogram() {
+      OnConstruction();
+    }
+
+    partial void OnConstruction();
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Histogram(Histogram other) : this() {
+      buckets_ = other.buckets_.Clone();
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Histogram Clone() {
+      return new Histogram(this);
+    }
+
+    /// <summary>Field number for the "buckets" field.</summary>
+    public const int BucketsFieldNumber = 1;
+    private static readonly pb::FieldCodec<global::Grpc.Core.Bucket> _repeated_buckets_codec
+        = pb::FieldCodec.ForMessage(10, global::Grpc.Core.Bucket.Parser);
+    private readonly pbc::RepeatedField<global::Grpc.Core.Bucket> buckets_ = new pbc::RepeatedField<global::Grpc.Core.Bucket>();
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public pbc::RepeatedField<global::Grpc.Core.Bucket> Buckets {
+      get { return buckets_; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override bool Equals(object other) {
+      return Equals(other as Histogram);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public bool Equals(Histogram other) {
+      if (ReferenceEquals(other, null)) {
+        return false;
+      }
+      if (ReferenceEquals(other, this)) {
+        return true;
+      }
+      if(!buckets_.Equals(other.buckets_)) return false;
+      return true;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override int GetHashCode() {
+      int hash = 1;
+      hash ^= buckets_.GetHashCode();
+      return hash;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override string ToString() {
+      return pb::JsonFormatter.ToDiagnosticString(this);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void WriteTo(pb::CodedOutputStream output) {
+      buckets_.WriteTo(output, _repeated_buckets_codec);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public int CalculateSize() {
+      int size = 0;
+      size += buckets_.CalculateSize(_repeated_buckets_codec);
+      return size;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(Histogram other) {
+      if (other == null) {
+        return;
+      }
+      buckets_.Add(other.buckets_);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(pb::CodedInputStream input) {
+      uint tag;
+      while ((tag = input.ReadTag()) != 0) {
+        switch(tag) {
+          default:
+            input.SkipLastField();
+            break;
+          case 10: {
+            buckets_.AddEntriesFrom(input, _repeated_buckets_codec);
+            break;
+          }
+        }
+      }
+    }
+
+  }
+
+  public sealed partial class Metric : pb::IMessage<Metric> {
+    private static readonly pb::MessageParser<Metric> _parser = new pb::MessageParser<Metric>(() => new Metric());
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pb::MessageParser<Metric> Parser { get { return _parser; } }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pbr::MessageDescriptor Descriptor {
+      get { return global::Grpc.Core.StatsReflection.Descriptor.MessageTypes[2]; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    pbr::MessageDescriptor pb::IMessage.Descriptor {
+      get { return Descriptor; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Metric() {
+      OnConstruction();
+    }
+
+    partial void OnConstruction();
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Metric(Metric other) : this() {
+      name_ = other.name_;
+      switch (other.ValueCase) {
+        case ValueOneofCase.Count:
+          Count = other.Count;
+          break;
+        case ValueOneofCase.Histogram:
+          Histogram = other.Histogram.Clone();
+          break;
+      }
+
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Metric Clone() {
+      return new Metric(this);
+    }
+
+    /// <summary>Field number for the "name" field.</summary>
+    public const int NameFieldNumber = 1;
+    private string name_ = "";
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public string Name {
+      get { return name_; }
+      set {
+        name_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+      }
+    }
+
+    /// <summary>Field number for the "count" field.</summary>
+    public const int CountFieldNumber = 10;
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public ulong Count {
+      get { return valueCase_ == ValueOneofCase.Count ? (ulong) value_ : 0UL; }
+      set {
+        value_ = value;
+        valueCase_ = ValueOneofCase.Count;
+      }
+    }
+
+    /// <summary>Field number for the "histogram" field.</summary>
+    public const int HistogramFieldNumber = 11;
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public global::Grpc.Core.Histogram Histogram {
+      get { return valueCase_ == ValueOneofCase.Histogram ? (global::Grpc.Core.Histogram) value_ : null; }
+      set {
+        value_ = value;
+        valueCase_ = value == null ? ValueOneofCase.None : ValueOneofCase.Histogram;
+      }
+    }
+
+    private object value_;
+    /// <summary>Enum of possible cases for the "value" oneof.</summary>
+    public enum ValueOneofCase {
+      None = 0,
+      Count = 10,
+      Histogram = 11,
+    }
+    private ValueOneofCase valueCase_ = ValueOneofCase.None;
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public ValueOneofCase ValueCase {
+      get { return valueCase_; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void ClearValue() {
+      valueCase_ = ValueOneofCase.None;
+      value_ = null;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override bool Equals(object other) {
+      return Equals(other as Metric);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public bool Equals(Metric other) {
+      if (ReferenceEquals(other, null)) {
+        return false;
+      }
+      if (ReferenceEquals(other, this)) {
+        return true;
+      }
+      if (Name != other.Name) return false;
+      if (Count != other.Count) return false;
+      if (!object.Equals(Histogram, other.Histogram)) return false;
+      if (ValueCase != other.ValueCase) return false;
+      return true;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override int GetHashCode() {
+      int hash = 1;
+      if (Name.Length != 0) hash ^= Name.GetHashCode();
+      if (valueCase_ == ValueOneofCase.Count) hash ^= Count.GetHashCode();
+      if (valueCase_ == ValueOneofCase.Histogram) hash ^= Histogram.GetHashCode();
+      hash ^= (int) valueCase_;
+      return hash;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override string ToString() {
+      return pb::JsonFormatter.ToDiagnosticString(this);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void WriteTo(pb::CodedOutputStream output) {
+      if (Name.Length != 0) {
+        output.WriteRawTag(10);
+        output.WriteString(Name);
+      }
+      if (valueCase_ == ValueOneofCase.Count) {
+        output.WriteRawTag(80);
+        output.WriteUInt64(Count);
+      }
+      if (valueCase_ == ValueOneofCase.Histogram) {
+        output.WriteRawTag(90);
+        output.WriteMessage(Histogram);
+      }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public int CalculateSize() {
+      int size = 0;
+      if (Name.Length != 0) {
+        size += 1 + pb::CodedOutputStream.ComputeStringSize(Name);
+      }
+      if (valueCase_ == ValueOneofCase.Count) {
+        size += 1 + pb::CodedOutputStream.ComputeUInt64Size(Count);
+      }
+      if (valueCase_ == ValueOneofCase.Histogram) {
+        size += 1 + pb::CodedOutputStream.ComputeMessageSize(Histogram);
+      }
+      return size;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(Metric other) {
+      if (other == null) {
+        return;
+      }
+      if (other.Name.Length != 0) {
+        Name = other.Name;
+      }
+      switch (other.ValueCase) {
+        case ValueOneofCase.Count:
+          Count = other.Count;
+          break;
+        case ValueOneofCase.Histogram:
+          if (Histogram == null) {
+            Histogram = new global::Grpc.Core.Histogram();
+          }
+          Histogram.MergeFrom(other.Histogram);
+          break;
+      }
+
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(pb::CodedInputStream input) {
+      uint tag;
+      while ((tag = input.ReadTag()) != 0) {
+        switch(tag) {
+          default:
+            input.SkipLastField();
+            break;
+          case 10: {
+            Name = input.ReadString();
+            break;
+          }
+          case 80: {
+            Count = input.ReadUInt64();
+            break;
+          }
+          case 90: {
+            global::Grpc.Core.Histogram subBuilder = new global::Grpc.Core.Histogram();
+            if (valueCase_ == ValueOneofCase.Histogram) {
+              subBuilder.MergeFrom(Histogram);
+            }
+            input.ReadMessage(subBuilder);
+            Histogram = subBuilder;
+            break;
+          }
+        }
+      }
+    }
+
+  }
+
+  public sealed partial class Stats : pb::IMessage<Stats> {
+    private static readonly pb::MessageParser<Stats> _parser = new pb::MessageParser<Stats>(() => new Stats());
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pb::MessageParser<Stats> Parser { get { return _parser; } }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pbr::MessageDescriptor Descriptor {
+      get { return global::Grpc.Core.StatsReflection.Descriptor.MessageTypes[3]; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    pbr::MessageDescriptor pb::IMessage.Descriptor {
+      get { return Descriptor; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Stats() {
+      OnConstruction();
+    }
+
+    partial void OnConstruction();
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Stats(Stats other) : this() {
+      metrics_ = other.metrics_.Clone();
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Stats Clone() {
+      return new Stats(this);
+    }
+
+    /// <summary>Field number for the "metrics" field.</summary>
+    public const int MetricsFieldNumber = 1;
+    private static readonly pb::FieldCodec<global::Grpc.Core.Metric> _repeated_metrics_codec
+        = pb::FieldCodec.ForMessage(10, global::Grpc.Core.Metric.Parser);
+    private readonly pbc::RepeatedField<global::Grpc.Core.Metric> metrics_ = new pbc::RepeatedField<global::Grpc.Core.Metric>();
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public pbc::RepeatedField<global::Grpc.Core.Metric> Metrics {
+      get { return metrics_; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override bool Equals(object other) {
+      return Equals(other as Stats);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public bool Equals(Stats other) {
+      if (ReferenceEquals(other, null)) {
+        return false;
+      }
+      if (ReferenceEquals(other, this)) {
+        return true;
+      }
+      if(!metrics_.Equals(other.metrics_)) return false;
+      return true;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override int GetHashCode() {
+      int hash = 1;
+      hash ^= metrics_.GetHashCode();
+      return hash;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override string ToString() {
+      return pb::JsonFormatter.ToDiagnosticString(this);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void WriteTo(pb::CodedOutputStream output) {
+      metrics_.WriteTo(output, _repeated_metrics_codec);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public int CalculateSize() {
+      int size = 0;
+      size += metrics_.CalculateSize(_repeated_metrics_codec);
+      return size;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(Stats other) {
+      if (other == null) {
+        return;
+      }
+      metrics_.Add(other.metrics_);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(pb::CodedInputStream input) {
+      uint tag;
+      while ((tag = input.ReadTag()) != 0) {
+        switch(tag) {
+          default:
+            input.SkipLastField();
+            break;
+          case 10: {
+            metrics_.AddEntriesFrom(input, _repeated_metrics_codec);
+            break;
+          }
+        }
+      }
+    }
+
+  }
+
+  #endregion
+
+}
+
+#endregion Designer generated code
diff --git a/src/csharp/Grpc.IntegrationTesting/EchoMessages.cs b/src/csharp/Grpc.IntegrationTesting/EchoMessages.cs
index b2fe73a..9581ade 100644
--- a/src/csharp/Grpc.IntegrationTesting/EchoMessages.cs
+++ b/src/csharp/Grpc.IntegrationTesting/EchoMessages.cs
@@ -26,7 +26,7 @@
             "DGdycGMudGVzdGluZyIyCglEZWJ1Z0luZm8SFQoNc3RhY2tfZW50cmllcxgB",
             "IAMoCRIOCgZkZXRhaWwYAiABKAkiUAoLRXJyb3JTdGF0dXMSDAoEY29kZRgB",
             "IAEoBRIVCg1lcnJvcl9tZXNzYWdlGAIgASgJEhwKFGJpbmFyeV9lcnJvcl9k",
-            "ZXRhaWxzGAMgASgJIskDCg1SZXF1ZXN0UGFyYW1zEhUKDWVjaG9fZGVhZGxp",
+            "ZXRhaWxzGAMgASgJIuIDCg1SZXF1ZXN0UGFyYW1zEhUKDWVjaG9fZGVhZGxp",
             "bmUYASABKAgSHgoWY2xpZW50X2NhbmNlbF9hZnRlcl91cxgCIAEoBRIeChZz",
             "ZXJ2ZXJfY2FuY2VsX2FmdGVyX3VzGAMgASgFEhUKDWVjaG9fbWV0YWRhdGEY",
             "BCABKAgSGgoSY2hlY2tfYXV0aF9jb250ZXh0GAUgASgIEh8KF3Jlc3BvbnNl",
@@ -36,18 +36,19 @@
             "X3R5cGUYCiABKAkSKwoKZGVidWdfaW5mbxgLIAEoCzIXLmdycGMudGVzdGlu",
             "Zy5EZWJ1Z0luZm8SEgoKc2VydmVyX2RpZRgMIAEoCBIcChRiaW5hcnlfZXJy",
             "b3JfZGV0YWlscxgNIAEoCRIxCg5leHBlY3RlZF9lcnJvchgOIAEoCzIZLmdy",
-            "cGMudGVzdGluZy5FcnJvclN0YXR1cyJKCgtFY2hvUmVxdWVzdBIPCgdtZXNz",
-            "YWdlGAEgASgJEioKBXBhcmFtGAIgASgLMhsuZ3JwYy50ZXN0aW5nLlJlcXVl",
-            "c3RQYXJhbXMiRgoOUmVzcG9uc2VQYXJhbXMSGAoQcmVxdWVzdF9kZWFkbGlu",
-            "ZRgBIAEoAxIMCgRob3N0GAIgASgJEgwKBHBlZXIYAyABKAkiTAoMRWNob1Jl",
-            "c3BvbnNlEg8KB21lc3NhZ2UYASABKAkSKwoFcGFyYW0YAiABKAsyHC5ncnBj",
-            "LnRlc3RpbmcuUmVzcG9uc2VQYXJhbXNiBnByb3RvMw=="));
+            "cGMudGVzdGluZy5FcnJvclN0YXR1cxIXCg9zZXJ2ZXJfc2xlZXBfdXMYDyAB",
+            "KAUiSgoLRWNob1JlcXVlc3QSDwoHbWVzc2FnZRgBIAEoCRIqCgVwYXJhbRgC",
+            "IAEoCzIbLmdycGMudGVzdGluZy5SZXF1ZXN0UGFyYW1zIkYKDlJlc3BvbnNl",
+            "UGFyYW1zEhgKEHJlcXVlc3RfZGVhZGxpbmUYASABKAMSDAoEaG9zdBgCIAEo",
+            "CRIMCgRwZWVyGAMgASgJIkwKDEVjaG9SZXNwb25zZRIPCgdtZXNzYWdlGAEg",
+            "ASgJEisKBXBhcmFtGAIgASgLMhwuZ3JwYy50ZXN0aW5nLlJlc3BvbnNlUGFy",
+            "YW1zYgZwcm90bzM="));
       descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
           new pbr::FileDescriptor[] { },
           new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.DebugInfo), global::Grpc.Testing.DebugInfo.Parser, new[]{ "StackEntries", "Detail" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ErrorStatus), global::Grpc.Testing.ErrorStatus.Parser, new[]{ "Code", "ErrorMessage", "BinaryErrorDetails" }, null, null, null),
-            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.RequestParams), global::Grpc.Testing.RequestParams.Parser, new[]{ "EchoDeadline", "ClientCancelAfterUs", "ServerCancelAfterUs", "EchoMetadata", "CheckAuthContext", "ResponseMessageLength", "EchoPeer", "ExpectedClientIdentity", "SkipCancelledCheck", "ExpectedTransportSecurityType", "DebugInfo", "ServerDie", "BinaryErrorDetails", "ExpectedError" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.RequestParams), global::Grpc.Testing.RequestParams.Parser, new[]{ "EchoDeadline", "ClientCancelAfterUs", "ServerCancelAfterUs", "EchoMetadata", "CheckAuthContext", "ResponseMessageLength", "EchoPeer", "ExpectedClientIdentity", "SkipCancelledCheck", "ExpectedTransportSecurityType", "DebugInfo", "ServerDie", "BinaryErrorDetails", "ExpectedError", "ServerSleepUs" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.EchoRequest), global::Grpc.Testing.EchoRequest.Parser, new[]{ "Message", "Param" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ResponseParams), global::Grpc.Testing.ResponseParams.Parser, new[]{ "RequestDeadline", "Host", "Peer" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.EchoResponse), global::Grpc.Testing.EchoResponse.Parser, new[]{ "Message", "Param" }, null, null, null)
@@ -411,6 +412,7 @@
       serverDie_ = other.serverDie_;
       binaryErrorDetails_ = other.binaryErrorDetails_;
       ExpectedError = other.expectedError_ != null ? other.ExpectedError.Clone() : null;
+      serverSleepUs_ = other.serverSleepUs_;
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -578,6 +580,20 @@
       }
     }
 
+    /// <summary>Field number for the "server_sleep_us" field.</summary>
+    public const int ServerSleepUsFieldNumber = 15;
+    private int serverSleepUs_;
+    /// <summary>
+    /// Amount to sleep when invoking server
+    /// </summary>
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public int ServerSleepUs {
+      get { return serverSleepUs_; }
+      set {
+        serverSleepUs_ = value;
+      }
+    }
+
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
     public override bool Equals(object other) {
       return Equals(other as RequestParams);
@@ -605,6 +621,7 @@
       if (ServerDie != other.ServerDie) return false;
       if (BinaryErrorDetails != other.BinaryErrorDetails) return false;
       if (!object.Equals(ExpectedError, other.ExpectedError)) return false;
+      if (ServerSleepUs != other.ServerSleepUs) return false;
       return true;
     }
 
@@ -625,6 +642,7 @@
       if (ServerDie != false) hash ^= ServerDie.GetHashCode();
       if (BinaryErrorDetails.Length != 0) hash ^= BinaryErrorDetails.GetHashCode();
       if (expectedError_ != null) hash ^= ExpectedError.GetHashCode();
+      if (ServerSleepUs != 0) hash ^= ServerSleepUs.GetHashCode();
       return hash;
     }
 
@@ -691,6 +709,10 @@
         output.WriteRawTag(114);
         output.WriteMessage(ExpectedError);
       }
+      if (ServerSleepUs != 0) {
+        output.WriteRawTag(120);
+        output.WriteInt32(ServerSleepUs);
+      }
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -738,6 +760,9 @@
       if (expectedError_ != null) {
         size += 1 + pb::CodedOutputStream.ComputeMessageSize(ExpectedError);
       }
+      if (ServerSleepUs != 0) {
+        size += 1 + pb::CodedOutputStream.ComputeInt32Size(ServerSleepUs);
+      }
       return size;
     }
 
@@ -794,6 +819,9 @@
         }
         ExpectedError.MergeFrom(other.ExpectedError);
       }
+      if (other.ServerSleepUs != 0) {
+        ServerSleepUs = other.ServerSleepUs;
+      }
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -866,6 +894,10 @@
             input.ReadMessage(expectedError_);
             break;
           }
+          case 120: {
+            ServerSleepUs = input.ReadInt32();
+            break;
+          }
         }
       }
     }
diff --git a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
index 9a664f3..f71d6d1 100644
--- a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
@@ -1,5 +1,7 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: src/proto/grpc/testing/metrics.proto
+// <auto-generated>
+//     Generated by the protocol buffer compiler.  DO NOT EDIT!
+//     source: src/proto/grpc/testing/metrics.proto
+// </auto-generated>
 // Original file comments:
 // Copyright 2015-2016 gRPC authors.
 //
diff --git a/src/csharp/Grpc.IntegrationTesting/Payloads.cs b/src/csharp/Grpc.IntegrationTesting/Payloads.cs
index f918b95..fca8cda 100644
--- a/src/csharp/Grpc.IntegrationTesting/Payloads.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Payloads.cs
@@ -596,13 +596,22 @@
       }
       switch (other.PayloadCase) {
         case PayloadOneofCase.BytebufParams:
-          BytebufParams = other.BytebufParams;
+          if (BytebufParams == null) {
+            BytebufParams = new global::Grpc.Testing.ByteBufferParams();
+          }
+          BytebufParams.MergeFrom(other.BytebufParams);
           break;
         case PayloadOneofCase.SimpleParams:
-          SimpleParams = other.SimpleParams;
+          if (SimpleParams == null) {
+            SimpleParams = new global::Grpc.Testing.SimpleProtoParams();
+          }
+          SimpleParams.MergeFrom(other.SimpleParams);
           break;
         case PayloadOneofCase.ComplexParams:
-          ComplexParams = other.ComplexParams;
+          if (ComplexParams == null) {
+            ComplexParams = new global::Grpc.Testing.ComplexProtoParams();
+          }
+          ComplexParams.MergeFrom(other.ComplexParams);
           break;
       }
 
diff --git a/src/csharp/Grpc.IntegrationTesting/Services.cs b/src/csharp/Grpc.IntegrationTesting/Services.cs
index 7a0845d..4b76170 100644
--- a/src/csharp/Grpc.IntegrationTesting/Services.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Services.cs
@@ -24,28 +24,27 @@
           string.Concat(
             "CiVzcmMvcHJvdG8vZ3JwYy90ZXN0aW5nL3NlcnZpY2VzLnByb3RvEgxncnBj",
             "LnRlc3RpbmcaJXNyYy9wcm90by9ncnBjL3Rlc3RpbmcvbWVzc2FnZXMucHJv",
-            "dG8aJHNyYy9wcm90by9ncnBjL3Rlc3RpbmcvY29udHJvbC5wcm90bxoic3Jj",
-            "L3Byb3RvL2dycGMvdGVzdGluZy9zdGF0cy5wcm90bzKmAwoQQmVuY2htYXJr",
-            "U2VydmljZRJGCglVbmFyeUNhbGwSGy5ncnBjLnRlc3RpbmcuU2ltcGxlUmVx",
-            "dWVzdBocLmdycGMudGVzdGluZy5TaW1wbGVSZXNwb25zZRJOCg1TdHJlYW1p",
-            "bmdDYWxsEhsuZ3JwYy50ZXN0aW5nLlNpbXBsZVJlcXVlc3QaHC5ncnBjLnRl",
-            "c3RpbmcuU2ltcGxlUmVzcG9uc2UoATABElIKE1N0cmVhbWluZ0Zyb21DbGll",
-            "bnQSGy5ncnBjLnRlc3RpbmcuU2ltcGxlUmVxdWVzdBocLmdycGMudGVzdGlu",
-            "Zy5TaW1wbGVSZXNwb25zZSgBElIKE1N0cmVhbWluZ0Zyb21TZXJ2ZXISGy5n",
-            "cnBjLnRlc3RpbmcuU2ltcGxlUmVxdWVzdBocLmdycGMudGVzdGluZy5TaW1w",
-            "bGVSZXNwb25zZTABElIKEVN0cmVhbWluZ0JvdGhXYXlzEhsuZ3JwYy50ZXN0",
-            "aW5nLlNpbXBsZVJlcXVlc3QaHC5ncnBjLnRlc3RpbmcuU2ltcGxlUmVzcG9u",
-            "c2UoATABMpcCCg1Xb3JrZXJTZXJ2aWNlEkUKCVJ1blNlcnZlchIYLmdycGMu",
-            "dGVzdGluZy5TZXJ2ZXJBcmdzGhouZ3JwYy50ZXN0aW5nLlNlcnZlclN0YXR1",
-            "cygBMAESRQoJUnVuQ2xpZW50EhguZ3JwYy50ZXN0aW5nLkNsaWVudEFyZ3Ma",
-            "Gi5ncnBjLnRlc3RpbmcuQ2xpZW50U3RhdHVzKAEwARJCCglDb3JlQ291bnQS",
-            "GS5ncnBjLnRlc3RpbmcuQ29yZVJlcXVlc3QaGi5ncnBjLnRlc3RpbmcuQ29y",
-            "ZVJlc3BvbnNlEjQKClF1aXRXb3JrZXISEi5ncnBjLnRlc3RpbmcuVm9pZBoS",
-            "LmdycGMudGVzdGluZy5Wb2lkMl4KGFJlcG9ydFFwc1NjZW5hcmlvU2Vydmlj",
-            "ZRJCCg5SZXBvcnRTY2VuYXJpbxIcLmdycGMudGVzdGluZy5TY2VuYXJpb1Jl",
-            "c3VsdBoSLmdycGMudGVzdGluZy5Wb2lkYgZwcm90bzM="));
+            "dG8aJHNyYy9wcm90by9ncnBjL3Rlc3RpbmcvY29udHJvbC5wcm90bzKmAwoQ",
+            "QmVuY2htYXJrU2VydmljZRJGCglVbmFyeUNhbGwSGy5ncnBjLnRlc3Rpbmcu",
+            "U2ltcGxlUmVxdWVzdBocLmdycGMudGVzdGluZy5TaW1wbGVSZXNwb25zZRJO",
+            "Cg1TdHJlYW1pbmdDYWxsEhsuZ3JwYy50ZXN0aW5nLlNpbXBsZVJlcXVlc3Qa",
+            "HC5ncnBjLnRlc3RpbmcuU2ltcGxlUmVzcG9uc2UoATABElIKE1N0cmVhbWlu",
+            "Z0Zyb21DbGllbnQSGy5ncnBjLnRlc3RpbmcuU2ltcGxlUmVxdWVzdBocLmdy",
+            "cGMudGVzdGluZy5TaW1wbGVSZXNwb25zZSgBElIKE1N0cmVhbWluZ0Zyb21T",
+            "ZXJ2ZXISGy5ncnBjLnRlc3RpbmcuU2ltcGxlUmVxdWVzdBocLmdycGMudGVz",
+            "dGluZy5TaW1wbGVSZXNwb25zZTABElIKEVN0cmVhbWluZ0JvdGhXYXlzEhsu",
+            "Z3JwYy50ZXN0aW5nLlNpbXBsZVJlcXVlc3QaHC5ncnBjLnRlc3RpbmcuU2lt",
+            "cGxlUmVzcG9uc2UoATABMpcCCg1Xb3JrZXJTZXJ2aWNlEkUKCVJ1blNlcnZl",
+            "chIYLmdycGMudGVzdGluZy5TZXJ2ZXJBcmdzGhouZ3JwYy50ZXN0aW5nLlNl",
+            "cnZlclN0YXR1cygBMAESRQoJUnVuQ2xpZW50EhguZ3JwYy50ZXN0aW5nLkNs",
+            "aWVudEFyZ3MaGi5ncnBjLnRlc3RpbmcuQ2xpZW50U3RhdHVzKAEwARJCCglD",
+            "b3JlQ291bnQSGS5ncnBjLnRlc3RpbmcuQ29yZVJlcXVlc3QaGi5ncnBjLnRl",
+            "c3RpbmcuQ29yZVJlc3BvbnNlEjQKClF1aXRXb3JrZXISEi5ncnBjLnRlc3Rp",
+            "bmcuVm9pZBoSLmdycGMudGVzdGluZy5Wb2lkMl4KGFJlcG9ydFFwc1NjZW5h",
+            "cmlvU2VydmljZRJCCg5SZXBvcnRTY2VuYXJpbxIcLmdycGMudGVzdGluZy5T",
+            "Y2VuYXJpb1Jlc3VsdBoSLmdycGMudGVzdGluZy5Wb2lkYgZwcm90bzM="));
       descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
-          new pbr::FileDescriptor[] { global::Grpc.Testing.MessagesReflection.Descriptor, global::Grpc.Testing.ControlReflection.Descriptor, global::Grpc.Testing.StatsReflection.Descriptor, },
+          new pbr::FileDescriptor[] { global::Grpc.Testing.MessagesReflection.Descriptor, global::Grpc.Testing.ControlReflection.Descriptor, },
           new pbr::GeneratedClrTypeInfo(null, null));
     }
     #endregion
diff --git a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
index bfae4ee..d2e4f2e 100644
--- a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
@@ -1,5 +1,7 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: src/proto/grpc/testing/services.proto
+// <auto-generated>
+//     Generated by the protocol buffer compiler.  DO NOT EDIT!
+//     source: src/proto/grpc/testing/services.proto
+// </auto-generated>
 // Original file comments:
 // Copyright 2015 gRPC authors.
 //
diff --git a/src/csharp/Grpc.IntegrationTesting/Stats.cs b/src/csharp/Grpc.IntegrationTesting/Stats.cs
index 23b56df..e082ae7 100644
--- a/src/csharp/Grpc.IntegrationTesting/Stats.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Stats.cs
@@ -23,28 +23,30 @@
       byte[] descriptorData = global::System.Convert.FromBase64String(
           string.Concat(
             "CiJzcmMvcHJvdG8vZ3JwYy90ZXN0aW5nL3N0YXRzLnByb3RvEgxncnBjLnRl",
-            "c3RpbmcikQEKC1NlcnZlclN0YXRzEhQKDHRpbWVfZWxhcHNlZBgBIAEoARIR",
-            "Cgl0aW1lX3VzZXIYAiABKAESEwoLdGltZV9zeXN0ZW0YAyABKAESFgoOdG90",
-            "YWxfY3B1X3RpbWUYBCABKAQSFQoNaWRsZV9jcHVfdGltZRgFIAEoBBIVCg1j",
-            "cV9wb2xsX2NvdW50GAYgASgEIjsKD0hpc3RvZ3JhbVBhcmFtcxISCgpyZXNv",
-            "bHV0aW9uGAEgASgBEhQKDG1heF9wb3NzaWJsZRgCIAEoASJ3Cg1IaXN0b2dy",
-            "YW1EYXRhEg4KBmJ1Y2tldBgBIAMoDRIQCghtaW5fc2VlbhgCIAEoARIQCght",
-            "YXhfc2VlbhgDIAEoARILCgNzdW0YBCABKAESFgoOc3VtX29mX3NxdWFyZXMY",
-            "BSABKAESDQoFY291bnQYBiABKAEiOAoSUmVxdWVzdFJlc3VsdENvdW50EhMK",
-            "C3N0YXR1c19jb2RlGAEgASgFEg0KBWNvdW50GAIgASgDIs0BCgtDbGllbnRT",
-            "dGF0cxIuCglsYXRlbmNpZXMYASABKAsyGy5ncnBjLnRlc3RpbmcuSGlzdG9n",
-            "cmFtRGF0YRIUCgx0aW1lX2VsYXBzZWQYAiABKAESEQoJdGltZV91c2VyGAMg",
-            "ASgBEhMKC3RpbWVfc3lzdGVtGAQgASgBEjkKD3JlcXVlc3RfcmVzdWx0cxgF",
-            "IAMoCzIgLmdycGMudGVzdGluZy5SZXF1ZXN0UmVzdWx0Q291bnQSFQoNY3Ff",
-            "cG9sbF9jb3VudBgGIAEoBGIGcHJvdG8z"));
+            "c3RpbmcaH3NyYy9wcm90by9ncnBjL2NvcmUvc3RhdHMucHJvdG8itwEKC1Nl",
+            "cnZlclN0YXRzEhQKDHRpbWVfZWxhcHNlZBgBIAEoARIRCgl0aW1lX3VzZXIY",
+            "AiABKAESEwoLdGltZV9zeXN0ZW0YAyABKAESFgoOdG90YWxfY3B1X3RpbWUY",
+            "BCABKAQSFQoNaWRsZV9jcHVfdGltZRgFIAEoBBIVCg1jcV9wb2xsX2NvdW50",
+            "GAYgASgEEiQKCmNvcmVfc3RhdHMYByABKAsyEC5ncnBjLmNvcmUuU3RhdHMi",
+            "OwoPSGlzdG9ncmFtUGFyYW1zEhIKCnJlc29sdXRpb24YASABKAESFAoMbWF4",
+            "X3Bvc3NpYmxlGAIgASgBIncKDUhpc3RvZ3JhbURhdGESDgoGYnVja2V0GAEg",
+            "AygNEhAKCG1pbl9zZWVuGAIgASgBEhAKCG1heF9zZWVuGAMgASgBEgsKA3N1",
+            "bRgEIAEoARIWCg5zdW1fb2Zfc3F1YXJlcxgFIAEoARINCgVjb3VudBgGIAEo",
+            "ASI4ChJSZXF1ZXN0UmVzdWx0Q291bnQSEwoLc3RhdHVzX2NvZGUYASABKAUS",
+            "DQoFY291bnQYAiABKAMi8wEKC0NsaWVudFN0YXRzEi4KCWxhdGVuY2llcxgB",
+            "IAEoCzIbLmdycGMudGVzdGluZy5IaXN0b2dyYW1EYXRhEhQKDHRpbWVfZWxh",
+            "cHNlZBgCIAEoARIRCgl0aW1lX3VzZXIYAyABKAESEwoLdGltZV9zeXN0ZW0Y",
+            "BCABKAESOQoPcmVxdWVzdF9yZXN1bHRzGAUgAygLMiAuZ3JwYy50ZXN0aW5n",
+            "LlJlcXVlc3RSZXN1bHRDb3VudBIVCg1jcV9wb2xsX2NvdW50GAYgASgEEiQK",
+            "CmNvcmVfc3RhdHMYByABKAsyEC5ncnBjLmNvcmUuU3RhdHNiBnByb3RvMw=="));
       descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
-          new pbr::FileDescriptor[] { },
+          new pbr::FileDescriptor[] { global::Grpc.Core.StatsReflection.Descriptor, },
           new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
-            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ServerStats), global::Grpc.Testing.ServerStats.Parser, new[]{ "TimeElapsed", "TimeUser", "TimeSystem", "TotalCpuTime", "IdleCpuTime", "CqPollCount" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ServerStats), global::Grpc.Testing.ServerStats.Parser, new[]{ "TimeElapsed", "TimeUser", "TimeSystem", "TotalCpuTime", "IdleCpuTime", "CqPollCount", "CoreStats" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.HistogramParams), global::Grpc.Testing.HistogramParams.Parser, new[]{ "Resolution", "MaxPossible" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.HistogramData), global::Grpc.Testing.HistogramData.Parser, new[]{ "Bucket", "MinSeen", "MaxSeen", "Sum", "SumOfSquares", "Count" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.RequestResultCount), global::Grpc.Testing.RequestResultCount.Parser, new[]{ "StatusCode", "Count" }, null, null, null),
-            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientStats), global::Grpc.Testing.ClientStats.Parser, new[]{ "Latencies", "TimeElapsed", "TimeUser", "TimeSystem", "RequestResults", "CqPollCount" }, null, null, null)
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientStats), global::Grpc.Testing.ClientStats.Parser, new[]{ "Latencies", "TimeElapsed", "TimeUser", "TimeSystem", "RequestResults", "CqPollCount", "CoreStats" }, null, null, null)
           }));
     }
     #endregion
@@ -81,6 +83,7 @@
       totalCpuTime_ = other.totalCpuTime_;
       idleCpuTime_ = other.idleCpuTime_;
       cqPollCount_ = other.cqPollCount_;
+      CoreStats = other.coreStats_ != null ? other.CoreStats.Clone() : null;
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -173,6 +176,20 @@
       }
     }
 
+    /// <summary>Field number for the "core_stats" field.</summary>
+    public const int CoreStatsFieldNumber = 7;
+    private global::Grpc.Core.Stats coreStats_;
+    /// <summary>
+    /// Core library stats
+    /// </summary>
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public global::Grpc.Core.Stats CoreStats {
+      get { return coreStats_; }
+      set {
+        coreStats_ = value;
+      }
+    }
+
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
     public override bool Equals(object other) {
       return Equals(other as ServerStats);
@@ -192,6 +209,7 @@
       if (TotalCpuTime != other.TotalCpuTime) return false;
       if (IdleCpuTime != other.IdleCpuTime) return false;
       if (CqPollCount != other.CqPollCount) return false;
+      if (!object.Equals(CoreStats, other.CoreStats)) return false;
       return true;
     }
 
@@ -204,6 +222,7 @@
       if (TotalCpuTime != 0UL) hash ^= TotalCpuTime.GetHashCode();
       if (IdleCpuTime != 0UL) hash ^= IdleCpuTime.GetHashCode();
       if (CqPollCount != 0UL) hash ^= CqPollCount.GetHashCode();
+      if (coreStats_ != null) hash ^= CoreStats.GetHashCode();
       return hash;
     }
 
@@ -238,6 +257,10 @@
         output.WriteRawTag(48);
         output.WriteUInt64(CqPollCount);
       }
+      if (coreStats_ != null) {
+        output.WriteRawTag(58);
+        output.WriteMessage(CoreStats);
+      }
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -261,6 +284,9 @@
       if (CqPollCount != 0UL) {
         size += 1 + pb::CodedOutputStream.ComputeUInt64Size(CqPollCount);
       }
+      if (coreStats_ != null) {
+        size += 1 + pb::CodedOutputStream.ComputeMessageSize(CoreStats);
+      }
       return size;
     }
 
@@ -287,6 +313,12 @@
       if (other.CqPollCount != 0UL) {
         CqPollCount = other.CqPollCount;
       }
+      if (other.coreStats_ != null) {
+        if (coreStats_ == null) {
+          coreStats_ = new global::Grpc.Core.Stats();
+        }
+        CoreStats.MergeFrom(other.CoreStats);
+      }
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -321,6 +353,13 @@
             CqPollCount = input.ReadUInt64();
             break;
           }
+          case 58: {
+            if (coreStats_ == null) {
+              coreStats_ = new global::Grpc.Core.Stats();
+            }
+            input.ReadMessage(coreStats_);
+            break;
+          }
         }
       }
     }
@@ -909,6 +948,7 @@
       timeSystem_ = other.timeSystem_;
       requestResults_ = other.requestResults_.Clone();
       cqPollCount_ = other.cqPollCount_;
+      CoreStats = other.coreStats_ != null ? other.CoreStats.Clone() : null;
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -993,6 +1033,20 @@
       }
     }
 
+    /// <summary>Field number for the "core_stats" field.</summary>
+    public const int CoreStatsFieldNumber = 7;
+    private global::Grpc.Core.Stats coreStats_;
+    /// <summary>
+    /// Core library stats
+    /// </summary>
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public global::Grpc.Core.Stats CoreStats {
+      get { return coreStats_; }
+      set {
+        coreStats_ = value;
+      }
+    }
+
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
     public override bool Equals(object other) {
       return Equals(other as ClientStats);
@@ -1012,6 +1066,7 @@
       if (TimeSystem != other.TimeSystem) return false;
       if(!requestResults_.Equals(other.requestResults_)) return false;
       if (CqPollCount != other.CqPollCount) return false;
+      if (!object.Equals(CoreStats, other.CoreStats)) return false;
       return true;
     }
 
@@ -1024,6 +1079,7 @@
       if (TimeSystem != 0D) hash ^= TimeSystem.GetHashCode();
       hash ^= requestResults_.GetHashCode();
       if (CqPollCount != 0UL) hash ^= CqPollCount.GetHashCode();
+      if (coreStats_ != null) hash ^= CoreStats.GetHashCode();
       return hash;
     }
 
@@ -1055,6 +1111,10 @@
         output.WriteRawTag(48);
         output.WriteUInt64(CqPollCount);
       }
+      if (coreStats_ != null) {
+        output.WriteRawTag(58);
+        output.WriteMessage(CoreStats);
+      }
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -1076,6 +1136,9 @@
       if (CqPollCount != 0UL) {
         size += 1 + pb::CodedOutputStream.ComputeUInt64Size(CqPollCount);
       }
+      if (coreStats_ != null) {
+        size += 1 + pb::CodedOutputStream.ComputeMessageSize(CoreStats);
+      }
       return size;
     }
 
@@ -1103,6 +1166,12 @@
       if (other.CqPollCount != 0UL) {
         CqPollCount = other.CqPollCount;
       }
+      if (other.coreStats_ != null) {
+        if (coreStats_ == null) {
+          coreStats_ = new global::Grpc.Core.Stats();
+        }
+        CoreStats.MergeFrom(other.CoreStats);
+      }
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -1140,6 +1209,13 @@
             CqPollCount = input.ReadUInt64();
             break;
           }
+          case 58: {
+            if (coreStats_ == null) {
+              coreStats_ = new global::Grpc.Core.Stats();
+            }
+            input.ReadMessage(coreStats_);
+            break;
+          }
         }
       }
     }
diff --git a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
index b419dd1..c0d147c 100644
--- a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
@@ -1,5 +1,7 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: src/proto/grpc/testing/test.proto
+// <auto-generated>
+//     Generated by the protocol buffer compiler.  DO NOT EDIT!
+//     source: src/proto/grpc/testing/test.proto
+// </auto-generated>
 // Original file comments:
 // Copyright 2015-2016 gRPC authors.
 //
diff --git a/src/csharp/Grpc.Reflection/Reflection.cs b/src/csharp/Grpc.Reflection/Reflection.cs
index 86e9aac..60090e5 100644
--- a/src/csharp/Grpc.Reflection/Reflection.cs
+++ b/src/csharp/Grpc.Reflection/Reflection.cs
@@ -345,7 +345,10 @@
           FileContainingSymbol = other.FileContainingSymbol;
           break;
         case MessageRequestOneofCase.FileContainingExtension:
-          FileContainingExtension = other.FileContainingExtension;
+          if (FileContainingExtension == null) {
+            FileContainingExtension = new global::Grpc.Reflection.V1Alpha.ExtensionRequest();
+          }
+          FileContainingExtension.MergeFrom(other.FileContainingExtension);
           break;
         case MessageRequestOneofCase.AllExtensionNumbersOfType:
           AllExtensionNumbersOfType = other.AllExtensionNumbersOfType;
@@ -816,16 +819,28 @@
       }
       switch (other.MessageResponseCase) {
         case MessageResponseOneofCase.FileDescriptorResponse:
-          FileDescriptorResponse = other.FileDescriptorResponse;
+          if (FileDescriptorResponse == null) {
+            FileDescriptorResponse = new global::Grpc.Reflection.V1Alpha.FileDescriptorResponse();
+          }
+          FileDescriptorResponse.MergeFrom(other.FileDescriptorResponse);
           break;
         case MessageResponseOneofCase.AllExtensionNumbersResponse:
-          AllExtensionNumbersResponse = other.AllExtensionNumbersResponse;
+          if (AllExtensionNumbersResponse == null) {
+            AllExtensionNumbersResponse = new global::Grpc.Reflection.V1Alpha.ExtensionNumberResponse();
+          }
+          AllExtensionNumbersResponse.MergeFrom(other.AllExtensionNumbersResponse);
           break;
         case MessageResponseOneofCase.ListServicesResponse:
-          ListServicesResponse = other.ListServicesResponse;
+          if (ListServicesResponse == null) {
+            ListServicesResponse = new global::Grpc.Reflection.V1Alpha.ListServiceResponse();
+          }
+          ListServicesResponse.MergeFrom(other.ListServicesResponse);
           break;
         case MessageResponseOneofCase.ErrorResponse:
-          ErrorResponse = other.ErrorResponse;
+          if (ErrorResponse == null) {
+            ErrorResponse = new global::Grpc.Reflection.V1Alpha.ErrorResponse();
+          }
+          ErrorResponse.MergeFrom(other.ErrorResponse);
           break;
       }
 
diff --git a/src/csharp/Grpc.Reflection/ReflectionGrpc.cs b/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
index 5843957..0195186 100644
--- a/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
+++ b/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
@@ -1,5 +1,7 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: grpc/reflection/v1alpha/reflection.proto
+// <auto-generated>
+//     Generated by the protocol buffer compiler.  DO NOT EDIT!
+//     source: grpc/reflection/v1alpha/reflection.proto
+// </auto-generated>
 // Original file comments:
 // Copyright 2016 gRPC authors.
 //
diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py
index 5647725..8b913ac 100644
--- a/src/python/grpcio/grpc/__init__.py
+++ b/src/python/grpcio/grpc/__init__.py
@@ -342,6 +342,170 @@
         raise NotImplementedError()
 
 
+##############  Invocation-Side Interceptor Interfaces & Classes  ##############
+
+
+class ClientCallDetails(six.with_metaclass(abc.ABCMeta)):
+    """Describes an RPC to be invoked.
+
+    This is an EXPERIMENTAL API.
+
+    Attributes:
+      method: The method name of the RPC.
+      timeout: An optional duration of time in seconds to allow for the RPC.
+      metadata: Optional :term:`metadata` to be transmitted to
+        the service-side of the RPC.
+      credentials: An optional CallCredentials for the RPC.
+    """
+
+
+class UnaryUnaryClientInterceptor(six.with_metaclass(abc.ABCMeta)):
+    """Affords intercepting unary-unary invocations.
+
+    This is an EXPERIMENTAL API.
+    """
+
+    @abc.abstractmethod
+    def intercept_unary_unary(self, continuation, client_call_details, request):
+        """Intercepts a unary-unary invocation asynchronously.
+
+        Args:
+          continuation: A function that proceeds with the invocation by
+            executing the next interceptor in chain or invoking the
+            actual RPC on the underlying Channel. It is the interceptor's
+            responsibility to call it if it decides to move the RPC forward.
+            The interceptor can use
+            `response_future = continuation(client_call_details, request)`
+            to continue with the RPC. `continuation` returns an object that is
+            both a Call for the RPC and a Future. In the event of RPC
+            completion, the return Call-Future's result value will be
+            the response message of the RPC. Should the event terminate
+            with non-OK status, the returned Call-Future's exception value
+            will be an RpcError.
+          client_call_details: A ClientCallDetails object describing the
+            outgoing RPC.
+          request: The request value for the RPC.
+
+        Returns:
+            An object that is both a Call for the RPC and a Future.
+            In the event of RPC completion, the return Call-Future's
+            result value will be the response message of the RPC.
+            Should the event terminate with non-OK status, the returned
+            Call-Future's exception value will be an RpcError.
+        """
+        raise NotImplementedError()
+
+
+class UnaryStreamClientInterceptor(six.with_metaclass(abc.ABCMeta)):
+    """Affords intercepting unary-stream invocations.
+
+    This is an EXPERIMENTAL API.
+    """
+
+    @abc.abstractmethod
+    def intercept_unary_stream(self, continuation, client_call_details,
+                               request):
+        """Intercepts a unary-stream invocation.
+
+        Args:
+          continuation: A function that proceeds with the invocation by
+            executing the next interceptor in chain or invoking the
+            actual RPC on the underlying Channel. It is the interceptor's
+            responsibility to call it if it decides to move the RPC forward.
+            The interceptor can use
+            `response_iterator = continuation(client_call_details, request)`
+            to continue with the RPC. `continuation` returns an object that is
+            both a Call for the RPC and an iterator for response values.
+            Drawing response values from the returned Call-iterator may
+            raise RpcError indicating termination of the RPC with non-OK
+            status.
+          client_call_details: A ClientCallDetails object describing the
+            outgoing RPC.
+          request: The request value for the RPC.
+
+        Returns:
+            An object that is both a Call for the RPC and an iterator of
+            response values. Drawing response values from the returned
+            Call-iterator may raise RpcError indicating termination of
+            the RPC with non-OK status.
+        """
+        raise NotImplementedError()
+
+
+class StreamUnaryClientInterceptor(six.with_metaclass(abc.ABCMeta)):
+    """Affords intercepting stream-unary invocations.
+
+    This is an EXPERIMENTAL API.
+    """
+
+    @abc.abstractmethod
+    def intercept_stream_unary(self, continuation, client_call_details,
+                               request_iterator):
+        """Intercepts a stream-unary invocation asynchronously.
+
+        Args:
+          continuation: A function that proceeds with the invocation by
+            executing the next interceptor in chain or invoking the
+            actual RPC on the underlying Channel. It is the interceptor's
+            responsibility to call it if it decides to move the RPC forward.
+            The interceptor can use
+            `response_future = continuation(client_call_details,
+                                            request_iterator)`
+            to continue with the RPC. `continuation` returns an object that is
+            both a Call for the RPC and a Future. In the event of RPC completion,
+            the return Call-Future's result value will be the response message
+            of the RPC. Should the event terminate with non-OK status, the
+            returned Call-Future's exception value will be an RpcError.
+          client_call_details: A ClientCallDetails object describing the
+            outgoing RPC.
+          request_iterator: An iterator that yields request values for the RPC.
+
+        Returns:
+            An object that is both a Call for the RPC and a Future.
+            In the event of RPC completion, the return Call-Future's
+            result value will be the response message of the RPC.
+            Should the event terminate with non-OK status, the returned
+            Call-Future's exception value will be an RpcError.
+        """
+        raise NotImplementedError()
+
+
+class StreamStreamClientInterceptor(six.with_metaclass(abc.ABCMeta)):
+    """Affords intercepting stream-stream invocations.
+
+    This is an EXPERIMENTAL API.
+    """
+
+    @abc.abstractmethod
+    def intercept_stream_stream(self, continuation, client_call_details,
+                                request_iterator):
+        """Intercepts a stream-stream invocation.
+
+          continuation: A function that proceeds with the invocation by
+            executing the next interceptor in chain or invoking the
+            actual RPC on the underlying Channel. It is the interceptor's
+            responsibility to call it if it decides to move the RPC forward.
+            The interceptor can use
+            `response_iterator = continuation(client_call_details,
+                                              request_iterator)`
+            to continue with the RPC. `continuation` returns an object that is
+            both a Call for the RPC and an iterator for response values.
+            Drawing response values from the returned Call-iterator may
+            raise RpcError indicating termination of the RPC with non-OK
+            status.
+          client_call_details: A ClientCallDetails object describing the
+            outgoing RPC.
+          request_iterator: An iterator that yields request values for the RPC.
+
+        Returns:
+            An object that is both a Call for the RPC and an iterator of
+            response values. Drawing response values from the returned
+            Call-iterator may raise RpcError indicating termination of
+            the RPC with non-OK status.
+        """
+        raise NotImplementedError()
+
+
 ############  Authentication & Authorization Interfaces & Classes  #############
 
 
@@ -835,27 +999,47 @@
         raise NotImplementedError()
 
     @abc.abstractmethod
+    def abort(self, code, details):
+        """Raises an exception to terminate the RPC with a non-OK status.
+
+        The code and details passed as arguments will supercede any existing
+        ones.
+
+        Args:
+          code: A StatusCode object to be sent to the client.
+            It must not be StatusCode.OK.
+          details: An ASCII-encodable string to be sent to the client upon
+            termination of the RPC.
+
+        Raises:
+          Exception: An exception is always raised to signal the abortion the
+            RPC to the gRPC runtime.
+        """
+        raise NotImplementedError()
+
+    @abc.abstractmethod
     def set_code(self, code):
         """Sets the value to be used as status code upon RPC completion.
 
-    This method need not be called by method implementations if they wish the
-    gRPC runtime to determine the status code of the RPC.
+        This method need not be called by method implementations if they wish
+        the gRPC runtime to determine the status code of the RPC.
 
-    Args:
-      code: A StatusCode object to be sent to the client.
-    """
+        Args:
+          code: A StatusCode object to be sent to the client.
+        """
         raise NotImplementedError()
 
     @abc.abstractmethod
     def set_details(self, details):
         """Sets the value to be used as detail string upon RPC completion.
 
-    This method need not be called by method implementations if they have no
-    details to transmit.
+        This method need not be called by method implementations if they have
+        no details to transmit.
 
-    Args:
-      details: An arbitrary string to be sent to the client upon completion.
-    """
+        Args:
+          details: An ASCII-encodable string to be sent to the client upon
+            termination of the RPC.
+        """
         raise NotImplementedError()
 
 
@@ -942,6 +1126,34 @@
         raise NotImplementedError()
 
 
+####################  Service-Side Interceptor Interfaces  #####################
+
+
+class ServerInterceptor(six.with_metaclass(abc.ABCMeta)):
+    """Affords intercepting incoming RPCs on the service-side.
+
+    This is an EXPERIMENTAL API.
+    """
+
+    @abc.abstractmethod
+    def intercept_service(self, continuation, handler_call_details):
+        """Intercepts incoming RPCs before handing them over to a handler.
+
+        Args:
+          continuation: A function that takes a HandlerCallDetails and
+            proceeds to invoke the next interceptor in the chain, if any,
+            or the RPC handler lookup logic, with the call details passed
+            as an argument, and returns an RpcMethodHandler instance if
+            the RPC is considered serviced, or None otherwise.
+          handler_call_details: A HandlerCallDetails describing the RPC.
+
+        Returns:
+          An RpcMethodHandler with which the RPC may be serviced if the
+          interceptor chooses to service this RPC, or None otherwise.
+        """
+        raise NotImplementedError()
+
+
 #############################  Server Interface  ###############################
 
 
@@ -1356,53 +1568,88 @@
                             credentials._credentials)
 
 
+def intercept_channel(channel, *interceptors):
+    """Intercepts a channel through a set of interceptors.
+
+    This is an EXPERIMENTAL API.
+
+    Args:
+      channel: A Channel.
+      interceptors: Zero or more objects of type
+        UnaryUnaryClientInterceptor,
+        UnaryStreamClientInterceptor,
+        StreamUnaryClientInterceptor, or
+        StreamStreamClientInterceptor.
+        Interceptors are given control in the order they are listed.
+
+    Returns:
+      A Channel that intercepts each invocation via the provided interceptors.
+
+    Raises:
+      TypeError: If interceptor does not derive from any of
+        UnaryUnaryClientInterceptor,
+        UnaryStreamClientInterceptor,
+        StreamUnaryClientInterceptor, or
+        StreamStreamClientInterceptor.
+    """
+    from grpc import _interceptor  # pylint: disable=cyclic-import
+    return _interceptor.intercept_channel(channel, *interceptors)
+
+
 def server(thread_pool,
            handlers=None,
+           interceptors=None,
            options=None,
            maximum_concurrent_rpcs=None):
     """Creates a Server with which RPCs can be serviced.
 
-  Args:
-    thread_pool: A futures.ThreadPoolExecutor to be used by the Server
-      to execute RPC handlers.
-    handlers: An optional list of GenericRpcHandlers used for executing RPCs.
-      More handlers may be added by calling add_generic_rpc_handlers any time
-      before the server is started.
-    options: An optional list of key-value pairs (channel args in gRPC runtime)
-    to configure the channel.
-    maximum_concurrent_rpcs: The maximum number of concurrent RPCs this server
-      will service before returning RESOURCE_EXHAUSTED status, or None to
-      indicate no limit.
+    Args:
+      thread_pool: A futures.ThreadPoolExecutor to be used by the Server
+        to execute RPC handlers.
+      handlers: An optional list of GenericRpcHandlers used for executing RPCs.
+        More handlers may be added by calling add_generic_rpc_handlers any time
+        before the server is started.
+      interceptors: An optional list of ServerInterceptor objects that observe
+        and optionally manipulate the incoming RPCs before handing them over to
+        handlers. The interceptors are given control in the order they are
+        specified. This is an EXPERIMENTAL API.
+      options: An optional list of key-value pairs (channel args in gRPC runtime)
+      to configure the channel.
+      maximum_concurrent_rpcs: The maximum number of concurrent RPCs this server
+        will service before returning RESOURCE_EXHAUSTED status, or None to
+        indicate no limit.
 
-  Returns:
-    A Server object.
-  """
+    Returns:
+      A Server object.
+    """
     from grpc import _server  # pylint: disable=cyclic-import
     return _server.Server(thread_pool, () if handlers is None else handlers, ()
-                          if options is None else options,
-                          maximum_concurrent_rpcs)
+                          if interceptors is None else interceptors, () if
+                          options is None else options, maximum_concurrent_rpcs)
 
 
 ###################################  __all__  #################################
 
-__all__ = ('FutureTimeoutError', 'FutureCancelledError', 'Future',
-           'ChannelConnectivity', 'StatusCode', 'RpcError', 'RpcContext',
-           'Call', 'ChannelCredentials', 'CallCredentials',
-           'AuthMetadataContext', 'AuthMetadataPluginCallback',
-           'AuthMetadataPlugin', 'ServerCertificateConfiguration',
-           'ServerCredentials', 'UnaryUnaryMultiCallable',
-           'UnaryStreamMultiCallable', 'StreamUnaryMultiCallable',
-           'StreamStreamMultiCallable', 'Channel', 'ServicerContext',
-           'RpcMethodHandler', 'HandlerCallDetails', 'GenericRpcHandler',
-           'ServiceRpcHandler', 'Server', 'unary_unary_rpc_method_handler',
-           'unary_stream_rpc_method_handler', 'stream_unary_rpc_method_handler',
-           'stream_stream_rpc_method_handler',
-           'method_handlers_generic_handler', 'ssl_channel_credentials',
-           'metadata_call_credentials', 'access_token_call_credentials',
-           'composite_call_credentials', 'composite_channel_credentials',
-           'ssl_server_credentials', 'ssl_server_certificate_configuration',
-           'dynamic_ssl_server_credentials', 'channel_ready_future',
-           'insecure_channel', 'secure_channel', 'server',)
+__all__ = (
+    'FutureTimeoutError', 'FutureCancelledError', 'Future',
+    'ChannelConnectivity', 'StatusCode', 'RpcError', 'RpcContext', 'Call',
+    'ChannelCredentials', 'CallCredentials', 'AuthMetadataContext',
+    'AuthMetadataPluginCallback', 'AuthMetadataPlugin', 'ClientCallDetails',
+    'ServerCertificateConfiguration', 'ServerCredentials',
+    'UnaryUnaryMultiCallable', 'UnaryStreamMultiCallable',
+    'StreamUnaryMultiCallable', 'StreamStreamMultiCallable',
+    'UnaryUnaryClientInterceptor', 'UnaryStreamClientInterceptor',
+    'StreamUnaryClientInterceptor', 'StreamStreamClientInterceptor', 'Channel',
+    'ServicerContext', 'RpcMethodHandler', 'HandlerCallDetails',
+    'GenericRpcHandler', 'ServiceRpcHandler', 'Server', 'ServerInterceptor',
+    'unary_unary_rpc_method_handler', 'unary_stream_rpc_method_handler',
+    'stream_unary_rpc_method_handler', 'stream_stream_rpc_method_handler',
+    'method_handlers_generic_handler', 'ssl_channel_credentials',
+    'metadata_call_credentials', 'access_token_call_credentials',
+    'composite_call_credentials', 'composite_channel_credentials',
+    'ssl_server_credentials', 'ssl_server_certificate_configuration',
+    'dynamic_ssl_server_credentials', 'channel_ready_future',
+    'insecure_channel', 'secure_channel', 'intercept_channel', 'server',)
 
 ############################### Extension Shims ################################
 
diff --git a/src/python/grpcio/grpc/_channel.py b/src/python/grpcio/grpc/_channel.py
index cf4ce09..d7456a3 100644
--- a/src/python/grpcio/grpc/_channel.py
+++ b/src/python/grpcio/grpc/_channel.py
@@ -122,8 +122,8 @@
         state.code = code
         state.details = details
         if state.initial_metadata is None:
-            state.initial_metadata = _common.EMPTY_METADATA
-        state.trailing_metadata = _common.EMPTY_METADATA
+            state.initial_metadata = ()
+        state.trailing_metadata = ()
 
 
 def _handle_event(event, state, response_deserializer):
@@ -202,8 +202,7 @@
                     else:
                         operations = (cygrpc.operation_send_message(
                             serialized_request, _EMPTY_FLAGS),)
-                        call.start_client_batch(
-                            cygrpc.Operations(operations), event_handler)
+                        call.start_client_batch(operations, event_handler)
                         state.due.add(cygrpc.OperationType.send_message)
                         while True:
                             state.condition.wait()
@@ -218,8 +217,7 @@
             if state.code is None:
                 operations = (
                     cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),)
-                call.start_client_batch(
-                    cygrpc.Operations(operations), event_handler)
+                call.start_client_batch(operations, event_handler)
                 state.due.add(cygrpc.OperationType.send_close_from_client)
 
     def stop_consumption_thread(timeout):  # pylint: disable=unused-argument
@@ -321,8 +319,7 @@
                 event_handler = _event_handler(self._state, self._call,
                                                self._response_deserializer)
                 self._call.start_client_batch(
-                    cygrpc.Operations(
-                        (cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
+                    (cygrpc.operation_receive_message(_EMPTY_FLAGS),),
                     event_handler)
                 self._state.due.add(cygrpc.OperationType.receive_message)
             elif self._state.code is grpc.StatusCode.OK:
@@ -372,14 +369,13 @@
         with self._state.condition:
             while self._state.initial_metadata is None:
                 self._state.condition.wait()
-            return _common.to_application_metadata(self._state.initial_metadata)
+            return self._state.initial_metadata
 
     def trailing_metadata(self):
         with self._state.condition:
             while self._state.trailing_metadata is None:
                 self._state.condition.wait()
-            return _common.to_application_metadata(
-                self._state.trailing_metadata)
+            return self._state.trailing_metadata
 
     def code(self):
         with self._state.condition:
@@ -420,8 +416,7 @@
     deadline, deadline_timespec = _deadline(timeout)
     serialized_request = _common.serialize(request, request_serializer)
     if serialized_request is None:
-        state = _RPCState((), _common.EMPTY_METADATA, _common.EMPTY_METADATA,
-                          grpc.StatusCode.INTERNAL,
+        state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
                           'Exception serializing request!')
         rendezvous = _Rendezvous(state, None, None, deadline)
         return deadline, deadline_timespec, None, rendezvous
@@ -458,8 +453,7 @@
         else:
             state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
             operations = (
-                cygrpc.operation_send_initial_metadata(
-                    _common.to_cygrpc_metadata(metadata), _EMPTY_FLAGS),
+                cygrpc.operation_send_initial_metadata(metadata, _EMPTY_FLAGS),
                 cygrpc.operation_send_message(serialized_request, _EMPTY_FLAGS),
                 cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
                 cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
@@ -479,8 +473,7 @@
                                              deadline_timespec)
             if credentials is not None:
                 call.set_credentials(credentials._credentials)
-            call_error = call.start_client_batch(
-                cygrpc.Operations(operations), None)
+            call_error = call.start_client_batch(operations, None)
             _check_call_error(call_error, metadata)
             _handle_event(completion_queue.poll(), state,
                           self._response_deserializer)
@@ -509,8 +502,7 @@
             event_handler = _event_handler(state, call,
                                            self._response_deserializer)
             with state.condition:
-                call_error = call.start_client_batch(
-                    cygrpc.Operations(operations), event_handler)
+                call_error = call.start_client_batch(operations, event_handler)
                 if call_error != cygrpc.CallError.ok:
                     _call_error_set_RPCstate(state, call_error, metadata)
                     return _Rendezvous(state, None, None, deadline)
@@ -544,18 +536,15 @@
                                            self._response_deserializer)
             with state.condition:
                 call.start_client_batch(
-                    cygrpc.Operations((
-                        cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
-                    )), event_handler)
+                    (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),),
+                    event_handler)
                 operations = (
                     cygrpc.operation_send_initial_metadata(
-                        _common.to_cygrpc_metadata(metadata),
-                        _EMPTY_FLAGS), cygrpc.operation_send_message(
+                        metadata, _EMPTY_FLAGS), cygrpc.operation_send_message(
                             serialized_request, _EMPTY_FLAGS),
                     cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
                     cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
-                call_error = call.start_client_batch(
-                    cygrpc.Operations(operations), event_handler)
+                call_error = call.start_client_batch(operations, event_handler)
                 if call_error != cygrpc.CallError.ok:
                     _call_error_set_RPCstate(state, call_error, metadata)
                     return _Rendezvous(state, None, None, deadline)
@@ -584,16 +573,13 @@
             call.set_credentials(credentials._credentials)
         with state.condition:
             call.start_client_batch(
-                cygrpc.Operations(
-                    (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
+                (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),),
                 None)
             operations = (
-                cygrpc.operation_send_initial_metadata(
-                    _common.to_cygrpc_metadata(metadata), _EMPTY_FLAGS),
+                cygrpc.operation_send_initial_metadata(metadata, _EMPTY_FLAGS),
                 cygrpc.operation_receive_message(_EMPTY_FLAGS),
                 cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
-            call_error = call.start_client_batch(
-                cygrpc.Operations(operations), None)
+            call_error = call.start_client_batch(operations, None)
             _check_call_error(call_error, metadata)
             _consume_request_iterator(request_iterator, state, call,
                                       self._request_serializer)
@@ -638,16 +624,13 @@
         event_handler = _event_handler(state, call, self._response_deserializer)
         with state.condition:
             call.start_client_batch(
-                cygrpc.Operations(
-                    (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
+                (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),),
                 event_handler)
             operations = (
-                cygrpc.operation_send_initial_metadata(
-                    _common.to_cygrpc_metadata(metadata), _EMPTY_FLAGS),
+                cygrpc.operation_send_initial_metadata(metadata, _EMPTY_FLAGS),
                 cygrpc.operation_receive_message(_EMPTY_FLAGS),
                 cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
-            call_error = call.start_client_batch(
-                cygrpc.Operations(operations), event_handler)
+            call_error = call.start_client_batch(operations, event_handler)
             if call_error != cygrpc.CallError.ok:
                 _call_error_set_RPCstate(state, call_error, metadata)
                 return _Rendezvous(state, None, None, deadline)
@@ -681,15 +664,12 @@
         event_handler = _event_handler(state, call, self._response_deserializer)
         with state.condition:
             call.start_client_batch(
-                cygrpc.Operations(
-                    (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
+                (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),),
                 event_handler)
             operations = (
-                cygrpc.operation_send_initial_metadata(
-                    _common.to_cygrpc_metadata(metadata), _EMPTY_FLAGS),
+                cygrpc.operation_send_initial_metadata(metadata, _EMPTY_FLAGS),
                 cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
-            call_error = call.start_client_batch(
-                cygrpc.Operations(operations), event_handler)
+            call_error = call.start_client_batch(operations, event_handler)
             if call_error != cygrpc.CallError.ok:
                 _call_error_set_RPCstate(state, call_error, metadata)
                 return _Rendezvous(state, None, None, deadline)
diff --git a/src/python/grpcio/grpc/_common.py b/src/python/grpcio/grpc/_common.py
index 740d463..130fc42 100644
--- a/src/python/grpcio/grpc/_common.py
+++ b/src/python/grpcio/grpc/_common.py
@@ -22,8 +22,6 @@
 import grpc
 from grpc._cython import cygrpc
 
-EMPTY_METADATA = cygrpc.Metadata(())
-
 CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
     cygrpc.ConnectivityState.idle:
     grpc.ChannelConnectivity.IDLE,
@@ -91,21 +89,6 @@
     return cygrpc.ChannelArgs(cygrpc_args)
 
 
-def to_cygrpc_metadata(application_metadata):
-    return EMPTY_METADATA if application_metadata is None else cygrpc.Metadata(
-        cygrpc.Metadatum(encode(key), encode(value))
-        for key, value in application_metadata)
-
-
-def to_application_metadata(cygrpc_metadata):
-    if cygrpc_metadata is None:
-        return ()
-    else:
-        return tuple((decode(key), value
-                      if key[-4:] == b'-bin' else decode(value))
-                     for key, value in cygrpc_metadata)
-
-
 def _transform(message, transformer, exception_message):
     if transformer is None:
         return message
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
index 6b3a276..6361669 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
@@ -26,20 +26,16 @@
   def _start_batch(self, operations, tag, retain_self):
     if not self.is_valid:
       raise ValueError("invalid call object cannot be used from Python")
-    cdef grpc_call_error result
-    cdef Operations cy_operations = Operations(operations)
-    cdef OperationTag operation_tag = OperationTag(tag)
+    cdef OperationTag operation_tag = OperationTag(tag, operations)
     if retain_self:
       operation_tag.operation_call = self
     else:
       operation_tag.operation_call = None
-    operation_tag.batch_operations = cy_operations
+    operation_tag.store_ops()
     cpython.Py_INCREF(operation_tag)
-    with nogil:
-      result = grpc_call_start_batch(
-          self.c_call, cy_operations.c_ops, cy_operations.c_nops,
+    return grpc_call_start_batch(
+          self.c_call, operation_tag.c_ops, operation_tag.c_nops,
           <cpython.PyObject *>operation_tag, NULL)
-    return result
 
   def start_client_batch(self, operations, tag):
     # We don't reference this call in the operations tag because
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
index 4c397f8..644df67 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
@@ -76,7 +76,7 @@
   def watch_connectivity_state(
       self, grpc_connectivity_state last_observed_state,
       Timespec deadline not None, CompletionQueue queue not None, tag):
-    cdef OperationTag operation_tag = OperationTag(tag)
+    cdef OperationTag operation_tag = OperationTag(tag, None)
     cpython.Py_INCREF(operation_tag)
     with nogil:
       grpc_channel_watch_connectivity_state(
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
index 237f430..140fc35 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
@@ -42,7 +42,7 @@
     cdef Call operation_call = None
     cdef CallDetails request_call_details = None
     cdef object request_metadata = None
-    cdef Operations batch_operations = None
+    cdef object batch_operations = None
     if event.type == GRPC_QUEUE_TIMEOUT:
       return Event(
           event.type, False, None, None, None, None, False, None)
@@ -61,9 +61,10 @@
         user_tag = tag.user_tag
         operation_call = tag.operation_call
         request_call_details = tag.request_call_details
-        if tag.request_metadata is not None:
-          request_metadata = tuple(tag.request_metadata)
-        batch_operations = tag.batch_operations
+        if tag.is_new_request:
+          request_metadata = _metadata(&tag._c_request_metadata)
+          grpc_metadata_array_destroy(&tag._c_request_metadata)
+        batch_operations = tag.release_ops()
         if tag.is_new_request:
           # Stuff in the tag not explicitly handled by us needs to live through
           # the life of the call
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
index 246a271..500086f 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
@@ -30,9 +30,13 @@
     grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
     size_t *num_creds_md, grpc_status_code *status,
     const char **error_details) with gil:
-  def callback(Metadata metadata, grpc_status_code status, bytes error_details):
+  cdef size_t metadata_count
+  cdef grpc_metadata *c_metadata
+  def callback(metadata, grpc_status_code status, bytes error_details):
     if status is StatusCode.ok:
-      cb(user_data, metadata.c_metadata, metadata.c_count, status, NULL)
+      _store_c_metadata(metadata, &c_metadata, &metadata_count)
+      cb(user_data, c_metadata, metadata_count, status, NULL)
+      _release_c_metadata(c_metadata, metadata_count)
     else:
       cb(user_data, NULL, 0, status, error_details)
   args = context.service_url, context.method_name, callback,
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi
index c8f11f8..53e06a1 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi
@@ -12,6 +12,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
+
 
 # This function will ascii encode unicode string inputs if neccesary.
 # In Python3, unicode strings are the default str type.
@@ -22,3 +24,30 @@
     return s.encode('ascii')
   else:
     raise TypeError('Expected bytes, str, or unicode, not {}'.format(type(s)))
+
+
+# TODO(https://github.com/grpc/grpc/issues/13782): It would be nice for us if
+# the type of metadata that we accept were exactly the same as the type of
+# metadata that we deliver to our users (so "str" for this function's
+# parameter rather than "object"), but would it be nice for our users? Right
+# now we haven't yet heard from enough users to know one way or another.
+cdef bytes _encode(object string_or_none):
+  if string_or_none is None:
+    return b''
+  elif isinstance(string_or_none, (bytes,)):
+    return <bytes>string_or_none
+  elif isinstance(string_or_none, (unicode,)):
+    return string_or_none.encode('ascii')
+  else:
+    raise TypeError('Expected str, not {}'.format(type(string_or_none)))
+
+
+cdef str _decode(bytes bytestring):
+    if isinstance(bytestring, (str,)):
+        return <str>bytestring
+    else:
+        try:
+            return bytestring.decode('utf8')
+        except UnicodeDecodeError:
+            logging.exception('Invalid encoding on %s', bytestring)
+            return bytestring.decode('latin1')
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/metadata.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/metadata.pxd.pxi
new file mode 100644
index 0000000..a18c365
--- /dev/null
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/metadata.pxd.pxi
@@ -0,0 +1,26 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef void _store_c_metadata(
+    metadata, grpc_metadata **c_metadata, size_t *c_count)
+
+
+cdef void _release_c_metadata(grpc_metadata *c_metadata, int count)
+
+
+cdef tuple _metadatum(grpc_slice key_slice, grpc_slice value_slice)
+
+
+cdef tuple _metadata(grpc_metadata_array *c_metadata_array)
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/metadata.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/metadata.pyx.pxi
new file mode 100644
index 0000000..c39fef0
--- /dev/null
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/metadata.pyx.pxi
@@ -0,0 +1,62 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+
+
+_Metadatum = collections.namedtuple('_Metadatum', ('key', 'value',))
+
+
+cdef void _store_c_metadata(
+    metadata, grpc_metadata **c_metadata, size_t *c_count):
+  if metadata is None:
+    c_count[0] = 0
+    c_metadata[0] = NULL
+  else:
+    metadatum_count = len(metadata)
+    if metadatum_count == 0:
+      c_count[0] = 0
+      c_metadata[0] = NULL
+    else:
+      c_count[0] = metadatum_count
+      c_metadata[0] = <grpc_metadata *>gpr_malloc(
+          metadatum_count * sizeof(grpc_metadata))
+      for index, (key, value) in enumerate(metadata):
+        encoded_key = _encode(key)
+        encoded_value = value if encoded_key[-4:] == b'-bin' else _encode(value)
+        c_metadata[0][index].key = _slice_from_bytes(encoded_key)
+        c_metadata[0][index].value = _slice_from_bytes(encoded_value)
+
+
+cdef void _release_c_metadata(grpc_metadata *c_metadata, int count):
+  if 0 < count:
+    for index in range(count):
+      grpc_slice_unref(c_metadata[index].key)
+      grpc_slice_unref(c_metadata[index].value)
+    gpr_free(c_metadata)
+
+
+cdef tuple _metadatum(grpc_slice key_slice, grpc_slice value_slice):
+  cdef bytes key = _slice_bytes(key_slice)
+  cdef bytes value = _slice_bytes(value_slice)
+  return <tuple>_Metadatum(
+      _decode(key), value if key[-4:] == b'-bin' else _decode(value))
+
+
+cdef tuple _metadata(grpc_metadata_array *c_metadata_array):
+  return tuple(
+      _metadatum(
+          c_metadata_array.metadata[index].key,
+          c_metadata_array.metadata[index].value)
+      for index in range(c_metadata_array.count))
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
index 9c40ebf..594fdb1 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
@@ -37,10 +37,15 @@
   cdef Server shutting_down_server
   cdef Call operation_call
   cdef CallDetails request_call_details
-  cdef MetadataArray request_metadata
-  cdef Operations batch_operations
+  cdef grpc_metadata_array _c_request_metadata
+  cdef grpc_op *c_ops
+  cdef size_t c_nops
+  cdef readonly object _operations
   cdef bint is_new_request
 
+  cdef void store_ops(self)
+  cdef object release_ops(self)
+
 
 cdef class Event:
 
@@ -57,7 +62,7 @@
   cdef readonly Call operation_call
 
   # For Call.start_batch
-  cdef readonly Operations batch_operations
+  cdef readonly object batch_operations
 
 
 cdef class ByteBuffer:
@@ -84,28 +89,15 @@
   cdef list args
 
 
-cdef class Metadatum:
-
-  cdef grpc_metadata c_metadata
-  cdef void _copy_metadatum(self, grpc_metadata *destination) nogil
-
-
-cdef class Metadata:
-
-  cdef grpc_metadata *c_metadata
-  cdef readonly size_t c_count
-
-
-cdef class MetadataArray:
-
-  cdef grpc_metadata_array c_metadata_array
-
-
 cdef class Operation:
 
   cdef grpc_op c_op
+  cdef bint _c_metadata_needs_release
+  cdef size_t _c_metadata_count
+  cdef grpc_metadata *_c_metadata
   cdef ByteBuffer _received_message
-  cdef MetadataArray _received_metadata
+  cdef bint _c_metadata_array_needs_destruction
+  cdef grpc_metadata_array _c_metadata_array
   cdef grpc_status_code _received_status_code
   cdef grpc_slice _status_details
   cdef int _received_cancelled
@@ -113,13 +105,6 @@
   cdef object references
 
 
-cdef class Operations:
-
-  cdef grpc_op *c_ops
-  cdef size_t c_nops
-  cdef list operations
-
-
 cdef class CompressionOptions:
 
   cdef grpc_compression_options c_options
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
index 03fb226..26eaf50 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
@@ -220,9 +220,26 @@
 
 cdef class OperationTag:
 
-  def __cinit__(self, user_tag):
+  def __cinit__(self, user_tag, operations):
     self.user_tag = user_tag
     self.references = []
+    self._operations = operations
+
+  cdef void store_ops(self):
+    self.c_nops = 0 if self._operations is None else len(self._operations)
+    if 0 < self.c_nops:
+      self.c_ops = <grpc_op *>gpr_malloc(sizeof(grpc_op) * self.c_nops)
+      for index in range(self.c_nops):
+        self.c_ops[index] = (<Operation>(self._operations[index])).c_op
+
+  cdef object release_ops(self):
+    if 0 < self.c_nops:
+      for index, operation in enumerate(self._operations):
+        (<Operation>operation).c_op = self.c_ops[index]
+      gpr_free(self.c_ops)
+      return self._operations
+    else:
+      return ()
 
 
 cdef class Event:
@@ -232,7 +249,7 @@
                 CallDetails request_call_details,
                 object request_metadata,
                 bint is_new_request,
-                Operations batch_operations):
+                object batch_operations):
     self.type = type
     self.success = success
     self.tag = tag
@@ -390,140 +407,13 @@
     return self.args[i]
 
 
-cdef class Metadatum:
-
-  def __cinit__(self, bytes key, bytes value):
-    self.c_metadata.key = _slice_from_bytes(key)
-    self.c_metadata.value = _slice_from_bytes(value)
-
-  cdef void _copy_metadatum(self, grpc_metadata *destination) nogil:
-    destination[0].key = _copy_slice(self.c_metadata.key)
-    destination[0].value = _copy_slice(self.c_metadata.value)
-
-  @property
-  def key(self):
-    return _slice_bytes(self.c_metadata.key)
-
-  @property
-  def value(self):
-    return _slice_bytes(self.c_metadata.value)
-
-  def __len__(self):
-    return 2
-
-  def __getitem__(self, size_t i):
-    if i == 0:
-      return self.key
-    elif i == 1:
-      return self.value
-    else:
-      raise IndexError("index must be 0 (key) or 1 (value)")
-
-  def __iter__(self):
-    return iter((self.key, self.value))
-
-  def __dealloc__(self):
-    grpc_slice_unref(self.c_metadata.key)
-    grpc_slice_unref(self.c_metadata.value)
-
-cdef class _MetadataIterator:
-
-  cdef size_t i
-  cdef size_t _length
-  cdef object _metadatum_indexable
-
-  def __cinit__(self, length, metadatum_indexable):
-    self._length = length
-    self._metadatum_indexable = metadatum_indexable
-    self.i = 0
-
-  def __iter__(self):
-    return self
-
-  def __next__(self):
-    if self.i < self._length:
-      result = self._metadatum_indexable[self.i]
-      self.i = self.i + 1
-      return result
-    else:
-      raise StopIteration()
-
-
-# TODO(https://github.com/grpc/grpc/issues/7950): Eliminate this; just use an
-# ordinary sequence of pairs of bytestrings all the way down to the
-# grpc_call_start_batch call.
-cdef class Metadata:
-  """Metadata being passed from application to core."""
-
-  def __cinit__(self, metadata_iterable):
-    metadata_sequence = tuple(metadata_iterable)
-    cdef size_t count = len(metadata_sequence)
-    with nogil:
-      grpc_init()
-      self.c_metadata = <grpc_metadata *>gpr_malloc(
-          count * sizeof(grpc_metadata))
-      self.c_count = count
-    for index, metadatum in enumerate(metadata_sequence):
-      self.c_metadata[index].key = grpc_slice_copy(
-          (<Metadatum>metadatum).c_metadata.key)
-      self.c_metadata[index].value = grpc_slice_copy(
-          (<Metadatum>metadatum).c_metadata.value)
-
-  def __dealloc__(self):
-    with nogil:
-      for index in range(self.c_count):
-        grpc_slice_unref(self.c_metadata[index].key)
-        grpc_slice_unref(self.c_metadata[index].value)
-      gpr_free(self.c_metadata)
-      grpc_shutdown()
-
-  def __len__(self):
-    return self.c_count
-
-  def __getitem__(self, size_t index):
-    if index < self.c_count:
-      key = _slice_bytes(self.c_metadata[index].key)
-      value = _slice_bytes(self.c_metadata[index].value)
-      return Metadatum(key, value)
-    else:
-      raise IndexError()
-
-  def __iter__(self):
-    return _MetadataIterator(self.c_count, self)
-
-
-cdef class MetadataArray:
-  """Metadata being passed from core to application."""
-
-  def __cinit__(self):
-    with nogil:
-      grpc_init()
-      grpc_metadata_array_init(&self.c_metadata_array)
-
-  def __dealloc__(self):
-    with nogil:
-      grpc_metadata_array_destroy(&self.c_metadata_array)
-      grpc_shutdown()
-
-  def __len__(self):
-    return self.c_metadata_array.count
-
-  def __getitem__(self, size_t i):
-    if i >= self.c_metadata_array.count:
-      raise IndexError()
-    key = _slice_bytes(self.c_metadata_array.metadata[i].key)
-    value = _slice_bytes(self.c_metadata_array.metadata[i].value)
-    return Metadatum(key=key, value=value)
-
-  def __iter__(self):
-    return _MetadataIterator(self.c_metadata_array.count, self)
-
-
 cdef class Operation:
 
   def __cinit__(self):
     grpc_init()
     self.references = []
+    self._c_metadata_needs_release = False
+    self._c_metadata_array_needs_destruction = False
     self._status_details = grpc_empty_slice()
     self.is_valid = False
 
@@ -556,13 +446,7 @@
     if (self.c_op.type != GRPC_OP_RECV_INITIAL_METADATA and
         self.c_op.type != GRPC_OP_RECV_STATUS_ON_CLIENT):
       raise TypeError("self must be an operation receiving metadata")
-    # TODO(https://github.com/grpc/grpc/issues/7950): Drop the "all Cython
-    # objects must be legitimate for use from Python at any time" policy in
-    # place today, shift the policy toward "Operation objects are only usable
-    # while their calls are active", and move this making-a-copy-because-this-
-    # data-needs-to-live-much-longer-than-the-call-from-which-it-arose to the
-    # lowest Python layer.
-    return tuple(self._received_metadata)
+    return _metadata(&self._c_metadata_array)
 
   @property
   def received_status_code(self):
@@ -602,16 +486,21 @@
     return False if self._received_cancelled == 0 else True
 
   def __dealloc__(self):
+    if self._c_metadata_needs_release:
+      _release_c_metadata(self._c_metadata, self._c_metadata_count)
+    if self._c_metadata_array_needs_destruction:
+      grpc_metadata_array_destroy(&self._c_metadata_array)
     grpc_slice_unref(self._status_details)
     grpc_shutdown()
 
-def operation_send_initial_metadata(Metadata metadata, int flags):
+def operation_send_initial_metadata(metadata, int flags):
   cdef Operation op = Operation()
   op.c_op.type = GRPC_OP_SEND_INITIAL_METADATA
   op.c_op.flags = flags
-  op.c_op.data.send_initial_metadata.count = metadata.c_count
-  op.c_op.data.send_initial_metadata.metadata = metadata.c_metadata
-  op.references.append(metadata)
+  _store_c_metadata(metadata, &op._c_metadata, &op._c_metadata_count)
+  op._c_metadata_needs_release = True
+  op.c_op.data.send_initial_metadata.count = op._c_metadata_count
+  op.c_op.data.send_initial_metadata.metadata = op._c_metadata
   op.is_valid = True
   return op
 
@@ -633,18 +522,19 @@
   return op
 
 def operation_send_status_from_server(
-    Metadata metadata, grpc_status_code code, bytes details, int flags):
+    metadata, grpc_status_code code, bytes details, int flags):
   cdef Operation op = Operation()
   op.c_op.type = GRPC_OP_SEND_STATUS_FROM_SERVER
   op.c_op.flags = flags
+  _store_c_metadata(metadata, &op._c_metadata, &op._c_metadata_count)
+  op._c_metadata_needs_release = True
   op.c_op.data.send_status_from_server.trailing_metadata_count = (
-      metadata.c_count)
-  op.c_op.data.send_status_from_server.trailing_metadata = metadata.c_metadata
+      op._c_metadata_count)
+  op.c_op.data.send_status_from_server.trailing_metadata = op._c_metadata
   op.c_op.data.send_status_from_server.status = code
   grpc_slice_unref(op._status_details)
   op._status_details = _slice_from_bytes(details)
   op.c_op.data.send_status_from_server.status_details = &op._status_details
-  op.references.append(metadata)
   op.is_valid = True
   return op
 
@@ -652,9 +542,10 @@
   cdef Operation op = Operation()
   op.c_op.type = GRPC_OP_RECV_INITIAL_METADATA
   op.c_op.flags = flags
-  op._received_metadata = MetadataArray()
+  grpc_metadata_array_init(&op._c_metadata_array)
   op.c_op.data.receive_initial_metadata.receive_initial_metadata = (
-      &op._received_metadata.c_metadata_array)
+      &op._c_metadata_array)
+  op._c_metadata_array_needs_destruction = True
   op.is_valid = True
   return op
 
@@ -675,9 +566,10 @@
   cdef Operation op = Operation()
   op.c_op.type = GRPC_OP_RECV_STATUS_ON_CLIENT
   op.c_op.flags = flags
-  op._received_metadata = MetadataArray()
+  grpc_metadata_array_init(&op._c_metadata_array)
   op.c_op.data.receive_status_on_client.trailing_metadata = (
-      &op._received_metadata.c_metadata_array)
+      &op._c_metadata_array)
+  op._c_metadata_array_needs_destruction = True
   op.c_op.data.receive_status_on_client.status = (
       &op._received_status_code)
   op.c_op.data.receive_status_on_client.status_details = (
@@ -694,59 +586,6 @@
   return op
 
 
-cdef class _OperationsIterator:
-
-  cdef size_t i
-  cdef Operations operations
-
-  def __cinit__(self, Operations operations not None):
-    self.i = 0
-    self.operations = operations
-
-  def __iter__(self):
-    return self
-
-  def __next__(self):
-    if self.i < len(self.operations):
-      result = self.operations[self.i]
-      self.i = self.i + 1
-      return result
-    else:
-      raise StopIteration()
-
-
-cdef class Operations:
-
-  def __cinit__(self, operations):
-    grpc_init()
-    self.operations = list(operations)  # normalize iterable
-    self.c_ops = NULL
-    self.c_nops = 0
-    for operation in self.operations:
-      if not isinstance(operation, Operation):
-        raise TypeError("expected operations to be iterable of Operation")
-    self.c_nops = len(self.operations)
-    with nogil:
-      self.c_ops = <grpc_op *>gpr_malloc(sizeof(grpc_op)*self.c_nops)
-    for i in range(self.c_nops):
-      self.c_ops[i] = (<Operation>(self.operations[i])).c_op
-
-  def __len__(self):
-    return self.c_nops
-
-  def __getitem__(self, size_t i):
-    # self.operations is never stale; it's only updated from this file
-    return self.operations[i]
-
-  def __dealloc__(self):
-    with nogil:
-      gpr_free(self.c_ops)
-    grpc_shutdown()
-
-  def __iter__(self):
-    return _OperationsIterator(self)
-
-
 cdef class CompressionOptions:
 
   def __cinit__(self):
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
index 5f34059..f8d7892 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
@@ -78,23 +78,19 @@
       raise ValueError("server must be started and not shutting down")
     if server_queue not in self.registered_completion_queues:
       raise ValueError("server_queue must be a registered completion queue")
-    cdef grpc_call_error result
-    cdef OperationTag operation_tag = OperationTag(tag)
+    cdef OperationTag operation_tag = OperationTag(tag, None)
     operation_tag.operation_call = Call()
     operation_tag.request_call_details = CallDetails()
-    operation_tag.request_metadata = MetadataArray()
+    grpc_metadata_array_init(&operation_tag._c_request_metadata)
     operation_tag.references.extend([self, call_queue, server_queue])
     operation_tag.is_new_request = True
-    operation_tag.batch_operations = Operations([])
     cpython.Py_INCREF(operation_tag)
-    with nogil:
-      result = grpc_server_request_call(
-          self.c_server, &operation_tag.operation_call.c_call,
-          &operation_tag.request_call_details.c_details,
-          &operation_tag.request_metadata.c_metadata_array,
-          call_queue.c_completion_queue, server_queue.c_completion_queue,
-          <cpython.PyObject *>operation_tag)
-    return result
+    return grpc_server_request_call(
+        self.c_server, &operation_tag.operation_call.c_call,
+        &operation_tag.request_call_details.c_details,
+        &operation_tag._c_request_metadata,
+        call_queue.c_completion_queue, server_queue.c_completion_queue,
+        <cpython.PyObject *>operation_tag)
 
   def register_completion_queue(
       self, CompletionQueue queue not None):
@@ -135,7 +131,7 @@
 
   cdef _c_shutdown(self, CompletionQueue queue, tag):
     self.is_shutting_down = True
-    operation_tag = OperationTag(tag)
+    operation_tag = OperationTag(tag, None)
     operation_tag.shutting_down_server = self
     cpython.Py_INCREF(operation_tag)
     with nogil:
diff --git a/src/python/grpcio/grpc/_cython/cygrpc.pxd b/src/python/grpcio/grpc/_cython/cygrpc.pxd
index fc6cc5f..6fc5638 100644
--- a/src/python/grpcio/grpc/_cython/cygrpc.pxd
+++ b/src/python/grpcio/grpc/_cython/cygrpc.pxd
@@ -18,6 +18,7 @@
 include "_cygrpc/channel.pxd.pxi"
 include "_cygrpc/credentials.pxd.pxi"
 include "_cygrpc/completion_queue.pxd.pxi"
+include "_cygrpc/metadata.pxd.pxi"
 include "_cygrpc/records.pxd.pxi"
 include "_cygrpc/security.pxd.pxi"
 include "_cygrpc/server.pxd.pxi"
diff --git a/src/python/grpcio/grpc/_cython/cygrpc.pyx b/src/python/grpcio/grpc/_cython/cygrpc.pyx
index 57165d5..d605229 100644
--- a/src/python/grpcio/grpc/_cython/cygrpc.pyx
+++ b/src/python/grpcio/grpc/_cython/cygrpc.pyx
@@ -25,6 +25,7 @@
 include "_cygrpc/channel.pyx.pxi"
 include "_cygrpc/credentials.pyx.pxi"
 include "_cygrpc/completion_queue.pyx.pxi"
+include "_cygrpc/metadata.pyx.pxi"
 include "_cygrpc/records.pyx.pxi"
 include "_cygrpc/security.pyx.pxi"
 include "_cygrpc/server.pyx.pxi"
diff --git a/src/python/grpcio/grpc/_interceptor.py b/src/python/grpcio/grpc/_interceptor.py
new file mode 100644
index 0000000..fffb269
--- /dev/null
+++ b/src/python/grpcio/grpc/_interceptor.py
@@ -0,0 +1,318 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Implementation of gRPC Python interceptors."""
+
+import collections
+import sys
+
+import grpc
+
+
+class _ServicePipeline(object):
+
+    def __init__(self, interceptors):
+        self.interceptors = tuple(interceptors)
+
+    def _continuation(self, thunk, index):
+        return lambda context: self._intercept_at(thunk, index, context)
+
+    def _intercept_at(self, thunk, index, context):
+        if index < len(self.interceptors):
+            interceptor = self.interceptors[index]
+            thunk = self._continuation(thunk, index + 1)
+            return interceptor.intercept_service(thunk, context)
+        else:
+            return thunk(context)
+
+    def execute(self, thunk, context):
+        return self._intercept_at(thunk, 0, context)
+
+
+def service_pipeline(interceptors):
+    return _ServicePipeline(interceptors) if interceptors else None
+
+
+class _ClientCallDetails(
+        collections.namedtuple('_ClientCallDetails',
+                               ('method', 'timeout', 'metadata',
+                                'credentials')), grpc.ClientCallDetails):
+    pass
+
+
+class _LocalFailure(grpc.RpcError, grpc.Future, grpc.Call):
+
+    def __init__(self, exception, traceback):
+        super(_LocalFailure, self).__init__()
+        self._exception = exception
+        self._traceback = traceback
+
+    def initial_metadata(self):
+        return None
+
+    def trailing_metadata(self):
+        return None
+
+    def code(self):
+        return grpc.StatusCode.INTERNAL
+
+    def details(self):
+        return 'Exception raised while intercepting the RPC'
+
+    def cancel(self):
+        return False
+
+    def cancelled(self):
+        return False
+
+    def running(self):
+        return False
+
+    def done(self):
+        return True
+
+    def result(self, ignored_timeout=None):
+        raise self._exception
+
+    def exception(self, ignored_timeout=None):
+        return self._exception
+
+    def traceback(self, ignored_timeout=None):
+        return self._traceback
+
+    def add_done_callback(self, fn):
+        fn(self)
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        raise self._exception
+
+
+class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
+
+    def __init__(self, thunk, method, interceptor):
+        self._thunk = thunk
+        self._method = method
+        self._interceptor = interceptor
+
+    def __call__(self, request, timeout=None, metadata=None, credentials=None):
+        call_future = self.future(
+            request,
+            timeout=timeout,
+            metadata=metadata,
+            credentials=credentials)
+        return call_future.result()
+
+    def with_call(self, request, timeout=None, metadata=None, credentials=None):
+        call_future = self.future(
+            request,
+            timeout=timeout,
+            metadata=metadata,
+            credentials=credentials)
+        return call_future.result(), call_future
+
+    def future(self, request, timeout=None, metadata=None, credentials=None):
+
+        def continuation(client_call_details, request):
+            return self._thunk(client_call_details.method).future(
+                request,
+                timeout=client_call_details.timeout,
+                metadata=client_call_details.metadata,
+                credentials=client_call_details.credentials)
+
+        client_call_details = _ClientCallDetails(self._method, timeout,
+                                                 metadata, credentials)
+        try:
+            return self._interceptor.intercept_unary_unary(
+                continuation, client_call_details, request)
+        except Exception as exception:  # pylint:disable=broad-except
+            return _LocalFailure(exception, sys.exc_info()[2])
+
+
+class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
+
+    def __init__(self, thunk, method, interceptor):
+        self._thunk = thunk
+        self._method = method
+        self._interceptor = interceptor
+
+    def __call__(self, request, timeout=None, metadata=None, credentials=None):
+
+        def continuation(client_call_details, request):
+            return self._thunk(client_call_details.method)(
+                request,
+                timeout=client_call_details.timeout,
+                metadata=client_call_details.metadata,
+                credentials=client_call_details.credentials)
+
+        client_call_details = _ClientCallDetails(self._method, timeout,
+                                                 metadata, credentials)
+        try:
+            return self._interceptor.intercept_unary_stream(
+                continuation, client_call_details, request)
+        except Exception as exception:  # pylint:disable=broad-except
+            return _LocalFailure(exception, sys.exc_info()[2])
+
+
+class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
+
+    def __init__(self, thunk, method, interceptor):
+        self._thunk = thunk
+        self._method = method
+        self._interceptor = interceptor
+
+    def __call__(self,
+                 request_iterator,
+                 timeout=None,
+                 metadata=None,
+                 credentials=None):
+        call_future = self.future(
+            request_iterator,
+            timeout=timeout,
+            metadata=metadata,
+            credentials=credentials)
+        return call_future.result()
+
+    def with_call(self,
+                  request_iterator,
+                  timeout=None,
+                  metadata=None,
+                  credentials=None):
+        call_future = self.future(
+            request_iterator,
+            timeout=timeout,
+            metadata=metadata,
+            credentials=credentials)
+        return call_future.result(), call_future
+
+    def future(self,
+               request_iterator,
+               timeout=None,
+               metadata=None,
+               credentials=None):
+
+        def continuation(client_call_details, request_iterator):
+            return self._thunk(client_call_details.method).future(
+                request_iterator,
+                timeout=client_call_details.timeout,
+                metadata=client_call_details.metadata,
+                credentials=client_call_details.credentials)
+
+        client_call_details = _ClientCallDetails(self._method, timeout,
+                                                 metadata, credentials)
+
+        try:
+            return self._interceptor.intercept_stream_unary(
+                continuation, client_call_details, request_iterator)
+        except Exception as exception:  # pylint:disable=broad-except
+            return _LocalFailure(exception, sys.exc_info()[2])
+
+
+class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
+
+    def __init__(self, thunk, method, interceptor):
+        self._thunk = thunk
+        self._method = method
+        self._interceptor = interceptor
+
+    def __call__(self,
+                 request_iterator,
+                 timeout=None,
+                 metadata=None,
+                 credentials=None):
+
+        def continuation(client_call_details, request_iterator):
+            return self._thunk(client_call_details.method)(
+                request_iterator,
+                timeout=client_call_details.timeout,
+                metadata=client_call_details.metadata,
+                credentials=client_call_details.credentials)
+
+        client_call_details = _ClientCallDetails(self._method, timeout,
+                                                 metadata, credentials)
+
+        try:
+            return self._interceptor.intercept_stream_stream(
+                continuation, client_call_details, request_iterator)
+        except Exception as exception:  # pylint:disable=broad-except
+            return _LocalFailure(exception, sys.exc_info()[2])
+
+
+class _Channel(grpc.Channel):
+
+    def __init__(self, channel, interceptor):
+        self._channel = channel
+        self._interceptor = interceptor
+
+    def subscribe(self, *args, **kwargs):
+        self._channel.subscribe(*args, **kwargs)
+
+    def unsubscribe(self, *args, **kwargs):
+        self._channel.unsubscribe(*args, **kwargs)
+
+    def unary_unary(self,
+                    method,
+                    request_serializer=None,
+                    response_deserializer=None):
+        thunk = lambda m: self._channel.unary_unary(m, request_serializer, response_deserializer)
+        if isinstance(self._interceptor, grpc.UnaryUnaryClientInterceptor):
+            return _UnaryUnaryMultiCallable(thunk, method, self._interceptor)
+        else:
+            return thunk(method)
+
+    def unary_stream(self,
+                     method,
+                     request_serializer=None,
+                     response_deserializer=None):
+        thunk = lambda m: self._channel.unary_stream(m, request_serializer, response_deserializer)
+        if isinstance(self._interceptor, grpc.UnaryStreamClientInterceptor):
+            return _UnaryStreamMultiCallable(thunk, method, self._interceptor)
+        else:
+            return thunk(method)
+
+    def stream_unary(self,
+                     method,
+                     request_serializer=None,
+                     response_deserializer=None):
+        thunk = lambda m: self._channel.stream_unary(m, request_serializer, response_deserializer)
+        if isinstance(self._interceptor, grpc.StreamUnaryClientInterceptor):
+            return _StreamUnaryMultiCallable(thunk, method, self._interceptor)
+        else:
+            return thunk(method)
+
+    def stream_stream(self,
+                      method,
+                      request_serializer=None,
+                      response_deserializer=None):
+        thunk = lambda m: self._channel.stream_stream(m, request_serializer, response_deserializer)
+        if isinstance(self._interceptor, grpc.StreamStreamClientInterceptor):
+            return _StreamStreamMultiCallable(thunk, method, self._interceptor)
+        else:
+            return thunk(method)
+
+
+def intercept_channel(channel, *interceptors):
+    for interceptor in reversed(list(interceptors)):
+        if not isinstance(interceptor, grpc.UnaryUnaryClientInterceptor) and \
+           not isinstance(interceptor, grpc.UnaryStreamClientInterceptor) and \
+           not isinstance(interceptor, grpc.StreamUnaryClientInterceptor) and \
+           not isinstance(interceptor, grpc.StreamStreamClientInterceptor):
+            raise TypeError('interceptor must be '
+                            'grpc.UnaryUnaryClientInterceptor or '
+                            'grpc.UnaryStreamClientInterceptor or '
+                            'grpc.StreamUnaryClientInterceptor or '
+                            'grpc.StreamStreamClientInterceptor or ')
+        channel = _Channel(channel, interceptor)
+    return channel
diff --git a/src/python/grpcio/grpc/_plugin_wrapping.py b/src/python/grpcio/grpc/_plugin_wrapping.py
index cd17f4a..f728795 100644
--- a/src/python/grpcio/grpc/_plugin_wrapping.py
+++ b/src/python/grpcio/grpc/_plugin_wrapping.py
@@ -54,9 +54,7 @@
                     'AuthMetadataPluginCallback raised exception "{}"!'.format(
                         self._state.exception))
         if error is None:
-            self._callback(
-                _common.to_cygrpc_metadata(metadata), cygrpc.StatusCode.ok,
-                None)
+            self._callback(metadata, cygrpc.StatusCode.ok, None)
         else:
             self._callback(None, cygrpc.StatusCode.internal,
                            _common.encode(str(error)))
diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py
index 5b4812b..02d3af8 100644
--- a/src/python/grpcio/grpc/_server.py
+++ b/src/python/grpcio/grpc/_server.py
@@ -23,6 +23,7 @@
 
 import grpc
 from grpc import _common
+from grpc import _interceptor
 from grpc._cython import cygrpc
 from grpc.framework.foundation import callable_util
 
@@ -96,6 +97,7 @@
         self.statused = False
         self.rpc_errors = []
         self.callbacks = []
+        self.abortion = None
 
 
 def _raise_rpc_error(state):
@@ -129,19 +131,17 @@
         effective_details = details if state.details is None else state.details
         if state.initial_metadata_allowed:
             operations = (cygrpc.operation_send_initial_metadata(
-                _common.EMPTY_METADATA,
-                _EMPTY_FLAGS), cygrpc.operation_send_status_from_server(
-                    _common.to_cygrpc_metadata(state.trailing_metadata),
-                    effective_code, effective_details, _EMPTY_FLAGS),)
+                (), _EMPTY_FLAGS), cygrpc.operation_send_status_from_server(
+                    state.trailing_metadata, effective_code, effective_details,
+                    _EMPTY_FLAGS),)
             token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
         else:
             operations = (cygrpc.operation_send_status_from_server(
-                _common.to_cygrpc_metadata(state.trailing_metadata),
-                effective_code, effective_details, _EMPTY_FLAGS),)
+                state.trailing_metadata, effective_code, effective_details,
+                _EMPTY_FLAGS),)
             token = _SEND_STATUS_FROM_SERVER_TOKEN
-        call.start_server_batch(
-            cygrpc.Operations(operations),
-            _send_status_from_server(state, token))
+        call.start_server_batch(operations,
+                                _send_status_from_server(state, token))
         state.statused = True
         state.due.add(token)
 
@@ -237,7 +237,7 @@
             self._state.disable_next_compression = True
 
     def invocation_metadata(self):
-        return _common.to_application_metadata(self._rpc_event.request_metadata)
+        return self._rpc_event.request_metadata
 
     def peer(self):
         return _common.decode(self._rpc_event.operation_call.peer())
@@ -263,11 +263,9 @@
             else:
                 if self._state.initial_metadata_allowed:
                     operation = cygrpc.operation_send_initial_metadata(
-                        _common.to_cygrpc_metadata(initial_metadata),
-                        _EMPTY_FLAGS)
+                        initial_metadata, _EMPTY_FLAGS)
                     self._rpc_event.operation_call.start_server_batch(
-                        cygrpc.Operations((operation,)),
-                        _send_initial_metadata(self._state))
+                        (operation,), _send_initial_metadata(self._state))
                     self._state.initial_metadata_allowed = False
                     self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
                 else:
@@ -275,8 +273,14 @@
 
     def set_trailing_metadata(self, trailing_metadata):
         with self._state.condition:
-            self._state.trailing_metadata = _common.to_cygrpc_metadata(
-                trailing_metadata)
+            self._state.trailing_metadata = trailing_metadata
+
+    def abort(self, code, details):
+        with self._state.condition:
+            self._state.code = code
+            self._state.details = _common.encode(details)
+            self._state.abortion = Exception()
+            raise self._state.abortion
 
     def set_code(self, code):
         with self._state.condition:
@@ -301,8 +305,7 @@
             raise StopIteration()
         else:
             self._call.start_server_batch(
-                cygrpc.Operations(
-                    (cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
+                (cygrpc.operation_receive_message(_EMPTY_FLAGS),),
                 _receive_message(self._state, self._call,
                                  self._request_deserializer))
             self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
@@ -345,8 +348,7 @@
                 return None
             else:
                 rpc_event.operation_call.start_server_batch(
-                    cygrpc.Operations(
-                        (cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
+                    (cygrpc.operation_receive_message(_EMPTY_FLAGS),),
                     _receive_message(state, rpc_event.operation_call,
                                      request_deserializer))
                 state.due.add(_RECEIVE_MESSAGE_TOKEN)
@@ -376,7 +378,10 @@
         return behavior(argument, context), True
     except Exception as exception:  # pylint: disable=broad-except
         with state.condition:
-            if exception not in state.rpc_errors:
+            if exception is state.abortion:
+                _abort(state, rpc_event.operation_call,
+                       cygrpc.StatusCode.unknown, b'RPC Aborted')
+            elif exception not in state.rpc_errors:
                 details = 'Exception calling application: {}'.format(exception)
                 logging.exception(details)
                 _abort(state, rpc_event.operation_call,
@@ -391,7 +396,10 @@
         return None, True
     except Exception as exception:  # pylint: disable=broad-except
         with state.condition:
-            if exception not in state.rpc_errors:
+            if exception is state.abortion:
+                _abort(state, rpc_event.operation_call,
+                       cygrpc.StatusCode.unknown, b'RPC Aborted')
+            elif exception not in state.rpc_errors:
                 details = 'Exception iterating responses: {}'.format(exception)
                 logging.exception(details)
                 _abort(state, rpc_event.operation_call,
@@ -417,9 +425,8 @@
         else:
             if state.initial_metadata_allowed:
                 operations = (cygrpc.operation_send_initial_metadata(
-                    _common.EMPTY_METADATA, _EMPTY_FLAGS),
-                              cygrpc.operation_send_message(serialized_response,
-                                                            _EMPTY_FLAGS),)
+                    (), _EMPTY_FLAGS), cygrpc.operation_send_message(
+                        serialized_response, _EMPTY_FLAGS),)
                 state.initial_metadata_allowed = False
                 token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
             else:
@@ -427,7 +434,7 @@
                                                             _EMPTY_FLAGS),)
                 token = _SEND_MESSAGE_TOKEN
             rpc_event.operation_call.start_server_batch(
-                cygrpc.Operations(operations), _send_message(state, token))
+                operations, _send_message(state, token))
             state.due.add(token)
             while True:
                 state.condition.wait()
@@ -438,24 +445,21 @@
 def _status(rpc_event, state, serialized_response):
     with state.condition:
         if state.client is not _CANCELLED:
-            trailing_metadata = _common.to_cygrpc_metadata(
-                state.trailing_metadata)
             code = _completion_code(state)
             details = _details(state)
             operations = [
                 cygrpc.operation_send_status_from_server(
-                    trailing_metadata, code, details, _EMPTY_FLAGS),
+                    state.trailing_metadata, code, details, _EMPTY_FLAGS),
             ]
             if state.initial_metadata_allowed:
                 operations.append(
-                    cygrpc.operation_send_initial_metadata(
-                        _common.EMPTY_METADATA, _EMPTY_FLAGS))
+                    cygrpc.operation_send_initial_metadata((), _EMPTY_FLAGS))
             if serialized_response is not None:
                 operations.append(
                     cygrpc.operation_send_message(serialized_response,
                                                   _EMPTY_FLAGS))
             rpc_event.operation_call.start_server_batch(
-                cygrpc.Operations(operations),
+                operations,
                 _send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
             state.statused = True
             state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
@@ -538,24 +542,31 @@
         method_handler.request_deserializer, method_handler.response_serializer)
 
 
-def _find_method_handler(rpc_event, generic_handlers):
-    for generic_handler in generic_handlers:
-        method_handler = generic_handler.service(
-            _HandlerCallDetails(
-                _common.decode(rpc_event.request_call_details.method),
-                rpc_event.request_metadata))
-        if method_handler is not None:
-            return method_handler
-    else:
+def _find_method_handler(rpc_event, generic_handlers, interceptor_pipeline):
+
+    def query_handlers(handler_call_details):
+        for generic_handler in generic_handlers:
+            method_handler = generic_handler.service(handler_call_details)
+            if method_handler is not None:
+                return method_handler
         return None
 
+    handler_call_details = _HandlerCallDetails(
+        _common.decode(rpc_event.request_call_details.method),
+        rpc_event.request_metadata)
+
+    if interceptor_pipeline is not None:
+        return interceptor_pipeline.execute(query_handlers,
+                                            handler_call_details)
+    else:
+        return query_handlers(handler_call_details)
+
 
 def _reject_rpc(rpc_event, status, details):
-    operations = (cygrpc.operation_send_initial_metadata(_common.EMPTY_METADATA,
-                                                         _EMPTY_FLAGS),
+    operations = (cygrpc.operation_send_initial_metadata((), _EMPTY_FLAGS),
                   cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
-                  cygrpc.operation_send_status_from_server(
-                      _common.EMPTY_METADATA, status, details, _EMPTY_FLAGS),)
+                  cygrpc.operation_send_status_from_server((), status, details,
+                                                           _EMPTY_FLAGS),)
     rpc_state = _RPCState()
     rpc_event.operation_call.start_server_batch(
         operations, lambda ignored_event: (rpc_state, (),))
@@ -566,8 +577,7 @@
     state = _RPCState()
     with state.condition:
         rpc_event.operation_call.start_server_batch(
-            cygrpc.Operations(
-                (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
+            (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),),
             _receive_close_on_server(state))
         state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
         if method_handler.request_streaming:
@@ -586,13 +596,14 @@
                                                   method_handler, thread_pool)
 
 
-def _handle_call(rpc_event, generic_handlers, thread_pool,
+def _handle_call(rpc_event, generic_handlers, interceptor_pipeline, thread_pool,
                  concurrency_exceeded):
     if not rpc_event.success:
         return None, None
     if rpc_event.request_call_details.method is not None:
         try:
-            method_handler = _find_method_handler(rpc_event, generic_handlers)
+            method_handler = _find_method_handler(rpc_event, generic_handlers,
+                                                  interceptor_pipeline)
         except Exception as exception:  # pylint: disable=broad-except
             details = 'Exception servicing handler: {}'.format(exception)
             logging.exception(details)
@@ -620,12 +631,14 @@
 
 class _ServerState(object):
 
-    def __init__(self, completion_queue, server, generic_handlers, thread_pool,
-                 maximum_concurrent_rpcs):
-        self.lock = threading.Lock()
+    # pylint: disable=too-many-arguments
+    def __init__(self, completion_queue, server, generic_handlers,
+                 interceptor_pipeline, thread_pool, maximum_concurrent_rpcs):
+        self.lock = threading.RLock()
         self.completion_queue = completion_queue
         self.server = server
         self.generic_handlers = list(generic_handlers)
+        self.interceptor_pipeline = interceptor_pipeline
         self.thread_pool = thread_pool
         self.stage = _ServerStage.STOPPED
         self.shutdown_events = None
@@ -690,8 +703,8 @@
                     state.maximum_concurrent_rpcs is not None and
                     state.active_rpc_count >= state.maximum_concurrent_rpcs)
                 rpc_state, rpc_future = _handle_call(
-                    event, state.generic_handlers, state.thread_pool,
-                    concurrency_exceeded)
+                    event, state.generic_handlers, state.interceptor_pipeline,
+                    state.thread_pool, concurrency_exceeded)
                 if rpc_state is not None:
                     state.rpc_states.add(rpc_state)
                 if rpc_future is not None:
@@ -734,22 +747,12 @@
             state.shutdown_events.append(shutdown_event)
             if grace is None:
                 state.server.cancel_all_calls()
-                # TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
-                for rpc_state in state.rpc_states:
-                    with rpc_state.condition:
-                        rpc_state.client = _CANCELLED
-                        rpc_state.condition.notify_all()
             else:
 
                 def cancel_all_calls_after_grace():
                     shutdown_event.wait(timeout=grace)
                     with state.lock:
                         state.server.cancel_all_calls()
-                        # TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
-                        for rpc_state in state.rpc_states:
-                            with rpc_state.condition:
-                                rpc_state.client = _CANCELLED
-                                rpc_state.condition.notify_all()
 
                 thread = threading.Thread(target=cancel_all_calls_after_grace)
                 thread.start()
@@ -779,12 +782,14 @@
 
 class Server(grpc.Server):
 
-    def __init__(self, thread_pool, generic_handlers, options,
+    # pylint: disable=too-many-arguments
+    def __init__(self, thread_pool, generic_handlers, interceptors, options,
                  maximum_concurrent_rpcs):
         completion_queue = cygrpc.CompletionQueue()
         server = cygrpc.Server(_common.channel_args(options))
         server.register_completion_queue(completion_queue)
         self._state = _ServerState(completion_queue, server, generic_handlers,
+                                   _interceptor.service_pipeline(interceptors),
                                    thread_pool, maximum_concurrent_rpcs)
 
     def add_generic_rpc_handlers(self, generic_rpc_handlers):
diff --git a/src/python/grpcio/grpc/beta/_client_adaptations.py b/src/python/grpcio/grpc/beta/_client_adaptations.py
index 73ce22f..dcaa0ee 100644
--- a/src/python/grpcio/grpc/beta/_client_adaptations.py
+++ b/src/python/grpcio/grpc/beta/_client_adaptations.py
@@ -15,6 +15,7 @@
 
 import grpc
 from grpc import _common
+from grpc.beta import _metadata
 from grpc.beta import interfaces
 from grpc.framework.common import cardinality
 from grpc.framework.foundation import future
@@ -157,10 +158,10 @@
         return _InvocationProtocolContext()
 
     def initial_metadata(self):
-        return self._call.initial_metadata()
+        return _metadata.beta(self._call.initial_metadata())
 
     def terminal_metadata(self):
-        return self._call.terminal_metadata()
+        return _metadata.beta(self._call.terminal_metadata())
 
     def code(self):
         return self._call.code()
@@ -182,14 +183,14 @@
             response, call = multi_callable.with_call(
                 request,
                 timeout=timeout,
-                metadata=effective_metadata,
+                metadata=_metadata.unbeta(effective_metadata),
                 credentials=_credentials(protocol_options))
             return response, _Rendezvous(None, None, call)
         else:
             return multi_callable(
                 request,
                 timeout=timeout,
-                metadata=effective_metadata,
+                metadata=_metadata.unbeta(effective_metadata),
                 credentials=_credentials(protocol_options))
     except grpc.RpcError as rpc_error_call:
         raise _abortion_error(rpc_error_call)
@@ -206,7 +207,7 @@
     response_future = multi_callable.future(
         request,
         timeout=timeout,
-        metadata=effective_metadata,
+        metadata=_metadata.unbeta(effective_metadata),
         credentials=_credentials(protocol_options))
     return _Rendezvous(response_future, None, response_future)
 
@@ -222,7 +223,7 @@
     response_iterator = multi_callable(
         request,
         timeout=timeout,
-        metadata=effective_metadata,
+        metadata=_metadata.unbeta(effective_metadata),
         credentials=_credentials(protocol_options))
     return _Rendezvous(None, response_iterator, response_iterator)
 
@@ -241,14 +242,14 @@
             response, call = multi_callable.with_call(
                 request_iterator,
                 timeout=timeout,
-                metadata=effective_metadata,
+                metadata=_metadata.unbeta(effective_metadata),
                 credentials=_credentials(protocol_options))
             return response, _Rendezvous(None, None, call)
         else:
             return multi_callable(
                 request_iterator,
                 timeout=timeout,
-                metadata=effective_metadata,
+                metadata=_metadata.unbeta(effective_metadata),
                 credentials=_credentials(protocol_options))
     except grpc.RpcError as rpc_error_call:
         raise _abortion_error(rpc_error_call)
@@ -265,7 +266,7 @@
     response_future = multi_callable.future(
         request_iterator,
         timeout=timeout,
-        metadata=effective_metadata,
+        metadata=_metadata.unbeta(effective_metadata),
         credentials=_credentials(protocol_options))
     return _Rendezvous(response_future, None, response_future)
 
@@ -281,7 +282,7 @@
     response_iterator = multi_callable(
         request_iterator,
         timeout=timeout,
-        metadata=effective_metadata,
+        metadata=_metadata.unbeta(effective_metadata),
         credentials=_credentials(protocol_options))
     return _Rendezvous(None, response_iterator, response_iterator)
 
diff --git a/src/python/grpcio/grpc/beta/_metadata.py b/src/python/grpcio/grpc/beta/_metadata.py
new file mode 100644
index 0000000..e135f4d
--- /dev/null
+++ b/src/python/grpcio/grpc/beta/_metadata.py
@@ -0,0 +1,49 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""API metadata conversion utilities."""
+
+import collections
+
+_Metadatum = collections.namedtuple('_Metadatum', ('key', 'value',))
+
+
+def _beta_metadatum(key, value):
+    beta_key = key if isinstance(key, (bytes,)) else key.encode('ascii')
+    beta_value = value if isinstance(value, (bytes,)) else value.encode('ascii')
+    return _Metadatum(beta_key, beta_value)
+
+
+def _metadatum(beta_key, beta_value):
+    key = beta_key if isinstance(beta_key, (str,)) else beta_key.decode('utf8')
+    if isinstance(beta_value, (str,)) or key[-4:] == '-bin':
+        value = beta_value
+    else:
+        value = beta_value.decode('utf8')
+    return _Metadatum(key, value)
+
+
+def beta(metadata):
+    if metadata is None:
+        return ()
+    else:
+        return tuple(_beta_metadatum(key, value) for key, value in metadata)
+
+
+def unbeta(beta_metadata):
+    if beta_metadata is None:
+        return ()
+    else:
+        return tuple(
+            _metadatum(beta_key, beta_value)
+            for beta_key, beta_value in beta_metadata)
diff --git a/src/python/grpcio/grpc/beta/_server_adaptations.py b/src/python/grpcio/grpc/beta/_server_adaptations.py
index ec363e9..1c22dbe 100644
--- a/src/python/grpcio/grpc/beta/_server_adaptations.py
+++ b/src/python/grpcio/grpc/beta/_server_adaptations.py
@@ -18,6 +18,7 @@
 
 import grpc
 from grpc import _common
+from grpc.beta import _metadata
 from grpc.beta import interfaces
 from grpc.framework.common import cardinality
 from grpc.framework.common import style
@@ -65,14 +66,15 @@
         return _ServerProtocolContext(self._servicer_context)
 
     def invocation_metadata(self):
-        return _common.to_cygrpc_metadata(
-            self._servicer_context.invocation_metadata())
+        return _metadata.beta(self._servicer_context.invocation_metadata())
 
     def initial_metadata(self, initial_metadata):
-        self._servicer_context.send_initial_metadata(initial_metadata)
+        self._servicer_context.send_initial_metadata(
+            _metadata.unbeta(initial_metadata))
 
     def terminal_metadata(self, terminal_metadata):
-        self._servicer_context.set_terminal_metadata(terminal_metadata)
+        self._servicer_context.set_terminal_metadata(
+            _metadata.unbeta(terminal_metadata))
 
     def code(self, code):
         self._servicer_context.set_code(code)
diff --git a/src/python/grpcio/grpc/beta/implementations.py b/src/python/grpcio/grpc/beta/implementations.py
index e52ce76..312daf0 100644
--- a/src/python/grpcio/grpc/beta/implementations.py
+++ b/src/python/grpcio/grpc/beta/implementations.py
@@ -21,6 +21,7 @@
 import grpc
 from grpc import _auth
 from grpc.beta import _client_adaptations
+from grpc.beta import _metadata
 from grpc.beta import _server_adaptations
 from grpc.beta import interfaces  # pylint: disable=unused-import
 from grpc.framework.common import cardinality  # pylint: disable=unused-import
@@ -31,7 +32,18 @@
 ChannelCredentials = grpc.ChannelCredentials
 ssl_channel_credentials = grpc.ssl_channel_credentials
 CallCredentials = grpc.CallCredentials
-metadata_call_credentials = grpc.metadata_call_credentials
+
+
+def metadata_call_credentials(metadata_plugin, name=None):
+
+    def plugin(context, callback):
+
+        def wrapped_callback(beta_metadata, error):
+            callback(_metadata.unbeta(beta_metadata), error)
+
+        metadata_plugin(context, wrapped_callback)
+
+    return grpc.metadata_call_credentials(plugin, name=name)
 
 
 def google_call_credentials(credentials):
diff --git a/src/python/grpcio_testing/grpc_testing/_server/_servicer_context.py b/src/python/grpcio_testing/grpc_testing/_server/_servicer_context.py
index 496689d..90eeb13 100644
--- a/src/python/grpcio_testing/grpc_testing/_server/_servicer_context.py
+++ b/src/python/grpcio_testing/grpc_testing/_server/_servicer_context.py
@@ -67,6 +67,9 @@
         self._rpc.set_trailing_metadata(
             _common.fuss_with_metadata(trailing_metadata))
 
+    def abort(self, code, details):
+        raise NotImplementedError()
+
     def set_code(self, code):
         self._rpc.set_code(code)
 
diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json
index 34cbade..3bf5308 100644
--- a/src/python/grpcio_tests/tests/tests.json
+++ b/src/python/grpcio_tests/tests/tests.json
@@ -39,6 +39,7 @@
   "unit._cython.cygrpc_test.TypeSmokeTest",
   "unit._empty_message_test.EmptyMessageTest",
   "unit._exit_test.ExitTest",
+  "unit._interceptor_test.InterceptorTest",
   "unit._invalid_metadata_test.InvalidMetadataTest",
   "unit._invocation_defects_test.InvocationDefectsTest",
   "unit._metadata_code_details_test.MetadataCodeDetailsTest",
diff --git a/src/python/grpcio_tests/tests/unit/_api_test.py b/src/python/grpcio_tests/tests/unit/_api_test.py
index b14e8d5..d6f4447 100644
--- a/src/python/grpcio_tests/tests/unit/_api_test.py
+++ b/src/python/grpcio_tests/tests/unit/_api_test.py
@@ -33,18 +33,21 @@
             'AuthMetadataPlugin', 'ServerCertificateConfiguration',
             'ServerCredentials', 'UnaryUnaryMultiCallable',
             'UnaryStreamMultiCallable', 'StreamUnaryMultiCallable',
-            'StreamStreamMultiCallable', 'Channel', 'ServicerContext',
+            'StreamStreamMultiCallable', 'UnaryUnaryClientInterceptor',
+            'UnaryStreamClientInterceptor', 'StreamUnaryClientInterceptor',
+            'StreamStreamClientInterceptor', 'Channel', 'ServicerContext',
             'RpcMethodHandler', 'HandlerCallDetails', 'GenericRpcHandler',
-            'ServiceRpcHandler', 'Server', 'unary_unary_rpc_method_handler',
-            'unary_stream_rpc_method_handler',
-            'stream_unary_rpc_method_handler',
+            'ServiceRpcHandler', 'Server', 'ServerInterceptor',
+            'unary_unary_rpc_method_handler', 'unary_stream_rpc_method_handler',
+            'stream_unary_rpc_method_handler', 'ClientCallDetails',
             'stream_stream_rpc_method_handler',
             'method_handlers_generic_handler', 'ssl_channel_credentials',
             'metadata_call_credentials', 'access_token_call_credentials',
             'composite_call_credentials', 'composite_channel_credentials',
             'ssl_server_credentials', 'ssl_server_certificate_configuration',
             'dynamic_ssl_server_credentials', 'channel_ready_future',
-            'insecure_channel', 'secure_channel', 'server',)
+            'insecure_channel', 'secure_channel', 'intercept_channel',
+            'server',)
 
         six.assertCountEqual(self, expected_grpc_code_elements,
                              _from_grpc_import_star.GRPC_ELEMENTS)
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py b/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py
index 5b97b7b..a8a7175 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py
@@ -22,7 +22,7 @@
 
 _INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
 _EMPTY_FLAGS = 0
-_EMPTY_METADATA = cygrpc.Metadata(())
+_EMPTY_METADATA = ()
 
 _SERVER_SHUTDOWN_TAG = 'server_shutdown'
 _REQUEST_CALL_TAG = 'request_call'
@@ -65,12 +65,10 @@
 
         with self._lock:
             self._call.start_server_batch(
-                cygrpc.Operations(
-                    (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
+                (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),),
                 _RECEIVE_CLOSE_ON_SERVER_TAG)
             self._call.start_server_batch(
-                cygrpc.Operations(
-                    (cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
+                (cygrpc.operation_receive_message(_EMPTY_FLAGS),),
                 _RECEIVE_MESSAGE_TAG)
         first_event = self._completion_queue.poll()
         if _is_cancellation_event(first_event):
@@ -84,8 +82,8 @@
                     cygrpc.operation_send_status_from_server(
                         _EMPTY_METADATA, cygrpc.StatusCode.ok, b'test details!',
                         _EMPTY_FLAGS),)
-                self._call.start_server_batch(
-                    cygrpc.Operations(operations), _SERVER_COMPLETE_CALL_TAG)
+                self._call.start_server_batch(operations,
+                                              _SERVER_COMPLETE_CALL_TAG)
             self._completion_queue.poll()
             self._completion_queue.poll()
 
@@ -179,8 +177,7 @@
                     cygrpc.operation_receive_message(_EMPTY_FLAGS),
                     cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
                 tag = 'client_complete_call_{0:04d}_tag'.format(index)
-                client_call.start_client_batch(
-                    cygrpc.Operations(operations), tag)
+                client_call.start_client_batch(operations, tag)
                 client_due.add(tag)
                 client_calls.append(client_call)
 
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_common.py b/src/python/grpcio_tests/tests/unit/_cython/_common.py
index ac66d1d..96f0f15 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/_common.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_common.py
@@ -23,17 +23,14 @@
 INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
 EMPTY_FLAGS = 0
 
-INVOCATION_METADATA = cygrpc.Metadata(
-    (cygrpc.Metadatum(b'client-md-key', b'client-md-key'),
-     cygrpc.Metadatum(b'client-md-key-bin', b'\x00\x01' * 3000),))
+INVOCATION_METADATA = (('client-md-key', 'client-md-key'),
+                       ('client-md-key-bin', b'\x00\x01' * 3000),)
 
-INITIAL_METADATA = cygrpc.Metadata(
-    (cygrpc.Metadatum(b'server-initial-md-key', b'server-initial-md-value'),
-     cygrpc.Metadatum(b'server-initial-md-key-bin', b'\x00\x02' * 3000),))
+INITIAL_METADATA = (('server-initial-md-key', 'server-initial-md-value'),
+                    ('server-initial-md-key-bin', b'\x00\x02' * 3000),)
 
-TRAILING_METADATA = cygrpc.Metadata(
-    (cygrpc.Metadatum(b'server-trailing-md-key', b'server-trailing-md-value'),
-     cygrpc.Metadatum(b'server-trailing-md-key-bin', b'\x00\x03' * 3000),))
+TRAILING_METADATA = (('server-trailing-md-key', 'server-trailing-md-value'),
+                     ('server-trailing-md-key-bin', b'\x00\x03' * 3000),)
 
 
 class QueueDriver(object):
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_no_messages_server_completion_queue_per_call_test.py b/src/python/grpcio_tests/tests/unit/_cython/_no_messages_server_completion_queue_per_call_test.py
index 14cc666..d08003a 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/_no_messages_server_completion_queue_per_call_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_no_messages_server_completion_queue_per_call_test.py
@@ -48,20 +48,19 @@
         client_complete_rpc_tag = 'client_complete_rpc_tag'
         with self.client_condition:
             client_receive_initial_metadata_start_batch_result = (
-                client_call.start_client_batch(
-                    cygrpc.Operations([
-                        cygrpc.operation_receive_initial_metadata(
-                            _common.EMPTY_FLAGS),
-                    ]), client_receive_initial_metadata_tag))
+                client_call.start_client_batch([
+                    cygrpc.operation_receive_initial_metadata(
+                        _common.EMPTY_FLAGS),
+                ], client_receive_initial_metadata_tag))
             client_complete_rpc_start_batch_result = client_call.start_client_batch(
-                cygrpc.Operations([
+                [
                     cygrpc.operation_send_initial_metadata(
                         _common.INVOCATION_METADATA, _common.EMPTY_FLAGS),
                     cygrpc.operation_send_close_from_client(
                         _common.EMPTY_FLAGS),
                     cygrpc.operation_receive_status_on_client(
                         _common.EMPTY_FLAGS),
-                ]), client_complete_rpc_tag)
+                ], client_complete_rpc_tag)
             self.client_driver.add_due({
                 client_receive_initial_metadata_tag,
                 client_complete_rpc_tag,
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_no_messages_single_server_completion_queue_test.py b/src/python/grpcio_tests/tests/unit/_cython/_no_messages_single_server_completion_queue_test.py
index 1e44bcc..d0166a2 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/_no_messages_single_server_completion_queue_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_no_messages_single_server_completion_queue_test.py
@@ -43,20 +43,19 @@
         client_complete_rpc_tag = 'client_complete_rpc_tag'
         with self.client_condition:
             client_receive_initial_metadata_start_batch_result = (
-                client_call.start_client_batch(
-                    cygrpc.Operations([
-                        cygrpc.operation_receive_initial_metadata(
-                            _common.EMPTY_FLAGS),
-                    ]), client_receive_initial_metadata_tag))
+                client_call.start_client_batch([
+                    cygrpc.operation_receive_initial_metadata(
+                        _common.EMPTY_FLAGS),
+                ], client_receive_initial_metadata_tag))
             client_complete_rpc_start_batch_result = client_call.start_client_batch(
-                cygrpc.Operations([
+                [
                     cygrpc.operation_send_initial_metadata(
                         _common.INVOCATION_METADATA, _common.EMPTY_FLAGS),
                     cygrpc.operation_send_close_from_client(
                         _common.EMPTY_FLAGS),
                     cygrpc.operation_receive_status_on_client(
                         _common.EMPTY_FLAGS),
-                ]), client_complete_rpc_tag)
+                ], client_complete_rpc_tag)
             self.client_driver.add_due({
                 client_receive_initial_metadata_tag,
                 client_complete_rpc_tag,
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py b/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py
index 0105612..1deb15b 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py
@@ -20,7 +20,7 @@
 
 _INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
 _EMPTY_FLAGS = 0
-_EMPTY_METADATA = cygrpc.Metadata(())
+_EMPTY_METADATA = ()
 
 
 class _ServerDriver(object):
@@ -157,19 +157,17 @@
         client_complete_rpc_tag = 'client_complete_rpc_tag'
         with client_condition:
             client_receive_initial_metadata_start_batch_result = (
-                client_call.start_client_batch(
-                    cygrpc.Operations([
-                        cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
-                    ]), client_receive_initial_metadata_tag))
+                client_call.start_client_batch([
+                    cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
+                ], client_receive_initial_metadata_tag))
             client_due.add(client_receive_initial_metadata_tag)
             client_complete_rpc_start_batch_result = (
-                client_call.start_client_batch(
-                    cygrpc.Operations([
-                        cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
-                                                               _EMPTY_FLAGS),
-                        cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
-                        cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
-                    ]), client_complete_rpc_tag))
+                client_call.start_client_batch([
+                    cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
+                                                           _EMPTY_FLAGS),
+                    cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
+                    cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
+                ], client_complete_rpc_tag))
             client_due.add(client_complete_rpc_tag)
 
         server_rpc_event = server_driver.first_event()
@@ -197,8 +195,8 @@
                 server_rpc_event.operation_call.start_server_batch([
                     cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
                     cygrpc.operation_send_status_from_server(
-                        cygrpc.Metadata(()), cygrpc.StatusCode.ok,
-                        b'test details', _EMPTY_FLAGS),
+                        (), cygrpc.StatusCode.ok, b'test details',
+                        _EMPTY_FLAGS),
                 ], server_complete_rpc_tag))
         server_send_second_message_event = server_call_driver.event_with_tag(
             server_send_second_message_tag)
@@ -209,10 +207,9 @@
         with client_condition:
             client_receive_first_message_tag = 'client_receive_first_message_tag'
             client_receive_first_message_start_batch_result = (
-                client_call.start_client_batch(
-                    cygrpc.Operations([
-                        cygrpc.operation_receive_message(_EMPTY_FLAGS),
-                    ]), client_receive_first_message_tag))
+                client_call.start_client_batch([
+                    cygrpc.operation_receive_message(_EMPTY_FLAGS),
+                ], client_receive_first_message_tag))
             client_due.add(client_receive_first_message_tag)
         client_receive_first_message_event = client_driver.event_with_tag(
             client_receive_first_message_tag)
diff --git a/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
index da94cf8..4eda685 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
@@ -29,50 +29,12 @@
 
 
 def _metadata_plugin(context, callback):
-    callback(
-        cygrpc.Metadata([
-            cygrpc.Metadatum(_CALL_CREDENTIALS_METADATA_KEY,
-                             _CALL_CREDENTIALS_METADATA_VALUE)
-        ]), cygrpc.StatusCode.ok, b'')
+    callback(((_CALL_CREDENTIALS_METADATA_KEY,
+               _CALL_CREDENTIALS_METADATA_VALUE,),), cygrpc.StatusCode.ok, b'')
 
 
 class TypeSmokeTest(unittest.TestCase):
 
-    def testStringsInUtilitiesUpDown(self):
-        self.assertEqual(0, cygrpc.StatusCode.ok)
-        metadatum = cygrpc.Metadatum(b'a', b'b')
-        self.assertEqual(b'a', metadatum.key)
-        self.assertEqual(b'b', metadatum.value)
-        metadata = cygrpc.Metadata([metadatum])
-        self.assertEqual(1, len(metadata))
-        self.assertEqual(metadatum.key, metadata[0].key)
-
-    def testMetadataIteration(self):
-        metadata = cygrpc.Metadata(
-            [cygrpc.Metadatum(b'a', b'b'), cygrpc.Metadatum(b'c', b'd')])
-        iterator = iter(metadata)
-        metadatum = next(iterator)
-        self.assertIsInstance(metadatum, cygrpc.Metadatum)
-        self.assertEqual(metadatum.key, b'a')
-        self.assertEqual(metadatum.value, b'b')
-        metadatum = next(iterator)
-        self.assertIsInstance(metadatum, cygrpc.Metadatum)
-        self.assertEqual(metadatum.key, b'c')
-        self.assertEqual(metadatum.value, b'd')
-        with self.assertRaises(StopIteration):
-            next(iterator)
-
-    def testOperationsIteration(self):
-        operations = cygrpc.Operations(
-            [cygrpc.operation_send_message(b'asdf', _EMPTY_FLAGS)])
-        iterator = iter(operations)
-        operation = next(iterator)
-        self.assertIsInstance(operation, cygrpc.Operation)
-        # `Operation`s are write-only structures; can't directly debug anything out
-        # of them. Just check that we stop iterating.
-        with self.assertRaises(StopIteration):
-            next(iterator)
-
     def testOperationFlags(self):
         operation = cygrpc.operation_send_message(b'asdf',
                                                   cygrpc.WriteFlag.no_compress)
@@ -182,8 +144,7 @@
         def performer():
             tag = object()
             try:
-                call_result = call.start_client_batch(
-                    cygrpc.Operations(operations), tag)
+                call_result = call.start_client_batch(operations, tag)
                 self.assertEqual(cygrpc.CallError.ok, call_result)
                 event = queue.poll(deadline)
                 self.assertEqual(cygrpc.CompletionType.operation_complete,
@@ -200,14 +161,14 @@
     def test_echo(self):
         DEADLINE = time.time() + 5
         DEADLINE_TOLERANCE = 0.25
-        CLIENT_METADATA_ASCII_KEY = b'key'
-        CLIENT_METADATA_ASCII_VALUE = b'val'
-        CLIENT_METADATA_BIN_KEY = b'key-bin'
+        CLIENT_METADATA_ASCII_KEY = 'key'
+        CLIENT_METADATA_ASCII_VALUE = 'val'
+        CLIENT_METADATA_BIN_KEY = 'key-bin'
         CLIENT_METADATA_BIN_VALUE = b'\0' * 1000
-        SERVER_INITIAL_METADATA_KEY = b'init_me_me_me'
-        SERVER_INITIAL_METADATA_VALUE = b'whodawha?'
-        SERVER_TRAILING_METADATA_KEY = b'california_is_in_a_drought'
-        SERVER_TRAILING_METADATA_VALUE = b'zomg it is'
+        SERVER_INITIAL_METADATA_KEY = 'init_me_me_me'
+        SERVER_INITIAL_METADATA_VALUE = 'whodawha?'
+        SERVER_TRAILING_METADATA_KEY = 'california_is_in_a_drought'
+        SERVER_TRAILING_METADATA_VALUE = 'zomg it is'
         SERVER_STATUS_CODE = cygrpc.StatusCode.ok
         SERVER_STATUS_DETAILS = b'our work is never over'
         REQUEST = b'in death a member of project mayhem has a name'
@@ -227,11 +188,9 @@
         client_call = self.client_channel.create_call(
             None, 0, self.client_completion_queue, METHOD, self.host_argument,
             cygrpc_deadline)
-        client_initial_metadata = cygrpc.Metadata([
-            cygrpc.Metadatum(CLIENT_METADATA_ASCII_KEY,
-                             CLIENT_METADATA_ASCII_VALUE),
-            cygrpc.Metadatum(CLIENT_METADATA_BIN_KEY, CLIENT_METADATA_BIN_VALUE)
-        ])
+        client_initial_metadata = (
+            (CLIENT_METADATA_ASCII_KEY, CLIENT_METADATA_ASCII_VALUE,),
+            (CLIENT_METADATA_BIN_KEY, CLIENT_METADATA_BIN_VALUE,),)
         client_start_batch_result = client_call.start_client_batch([
             cygrpc.operation_send_initial_metadata(client_initial_metadata,
                                                    _EMPTY_FLAGS),
@@ -263,14 +222,10 @@
 
         server_call_tag = object()
         server_call = request_event.operation_call
-        server_initial_metadata = cygrpc.Metadata([
-            cygrpc.Metadatum(SERVER_INITIAL_METADATA_KEY,
-                             SERVER_INITIAL_METADATA_VALUE)
-        ])
-        server_trailing_metadata = cygrpc.Metadata([
-            cygrpc.Metadatum(SERVER_TRAILING_METADATA_KEY,
-                             SERVER_TRAILING_METADATA_VALUE)
-        ])
+        server_initial_metadata = (
+            (SERVER_INITIAL_METADATA_KEY, SERVER_INITIAL_METADATA_VALUE,),)
+        server_trailing_metadata = (
+            (SERVER_TRAILING_METADATA_KEY, SERVER_TRAILING_METADATA_VALUE,),)
         server_start_batch_result = server_call.start_server_batch([
             cygrpc.operation_send_initial_metadata(
                 server_initial_metadata,
@@ -347,7 +302,7 @@
         METHOD = b'twinkies'
 
         cygrpc_deadline = cygrpc.Timespec(DEADLINE)
-        empty_metadata = cygrpc.Metadata([])
+        empty_metadata = ()
 
         server_request_tag = object()
         self.server.request_call(self.server_completion_queue,
diff --git a/src/python/grpcio_tests/tests/unit/_interceptor_test.py b/src/python/grpcio_tests/tests/unit/_interceptor_test.py
new file mode 100644
index 0000000..cf875ed
--- /dev/null
+++ b/src/python/grpcio_tests/tests/unit/_interceptor_test.py
@@ -0,0 +1,571 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Test of gRPC Python interceptors."""
+
+import collections
+import itertools
+import threading
+import unittest
+from concurrent import futures
+
+import grpc
+from grpc.framework.foundation import logging_pool
+
+from tests.unit.framework.common import test_constants
+from tests.unit.framework.common import test_control
+
+_SERIALIZE_REQUEST = lambda bytestring: bytestring * 2
+_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:]
+_SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3
+_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3]
+
+_UNARY_UNARY = '/test/UnaryUnary'
+_UNARY_STREAM = '/test/UnaryStream'
+_STREAM_UNARY = '/test/StreamUnary'
+_STREAM_STREAM = '/test/StreamStream'
+
+
+class _Callback(object):
+
+    def __init__(self):
+        self._condition = threading.Condition()
+        self._value = None
+        self._called = False
+
+    def __call__(self, value):
+        with self._condition:
+            self._value = value
+            self._called = True
+            self._condition.notify_all()
+
+    def value(self):
+        with self._condition:
+            while not self._called:
+                self._condition.wait()
+            return self._value
+
+
+class _Handler(object):
+
+    def __init__(self, control):
+        self._control = control
+
+    def handle_unary_unary(self, request, servicer_context):
+        self._control.control()
+        if servicer_context is not None:
+            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+        return request
+
+    def handle_unary_stream(self, request, servicer_context):
+        for _ in range(test_constants.STREAM_LENGTH):
+            self._control.control()
+            yield request
+        self._control.control()
+        if servicer_context is not None:
+            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+
+    def handle_stream_unary(self, request_iterator, servicer_context):
+        if servicer_context is not None:
+            servicer_context.invocation_metadata()
+        self._control.control()
+        response_elements = []
+        for request in request_iterator:
+            self._control.control()
+            response_elements.append(request)
+        self._control.control()
+        if servicer_context is not None:
+            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+        return b''.join(response_elements)
+
+    def handle_stream_stream(self, request_iterator, servicer_context):
+        self._control.control()
+        if servicer_context is not None:
+            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+        for request in request_iterator:
+            self._control.control()
+            yield request
+        self._control.control()
+
+
+class _MethodHandler(grpc.RpcMethodHandler):
+
+    def __init__(self, request_streaming, response_streaming,
+                 request_deserializer, response_serializer, unary_unary,
+                 unary_stream, stream_unary, stream_stream):
+        self.request_streaming = request_streaming
+        self.response_streaming = response_streaming
+        self.request_deserializer = request_deserializer
+        self.response_serializer = response_serializer
+        self.unary_unary = unary_unary
+        self.unary_stream = unary_stream
+        self.stream_unary = stream_unary
+        self.stream_stream = stream_stream
+
+
+class _GenericHandler(grpc.GenericRpcHandler):
+
+    def __init__(self, handler):
+        self._handler = handler
+
+    def service(self, handler_call_details):
+        if handler_call_details.method == _UNARY_UNARY:
+            return _MethodHandler(False, False, None, None,
+                                  self._handler.handle_unary_unary, None, None,
+                                  None)
+        elif handler_call_details.method == _UNARY_STREAM:
+            return _MethodHandler(False, True, _DESERIALIZE_REQUEST,
+                                  _SERIALIZE_RESPONSE, None,
+                                  self._handler.handle_unary_stream, None, None)
+        elif handler_call_details.method == _STREAM_UNARY:
+            return _MethodHandler(True, False, _DESERIALIZE_REQUEST,
+                                  _SERIALIZE_RESPONSE, None, None,
+                                  self._handler.handle_stream_unary, None)
+        elif handler_call_details.method == _STREAM_STREAM:
+            return _MethodHandler(True, True, None, None, None, None, None,
+                                  self._handler.handle_stream_stream)
+        else:
+            return None
+
+
+def _unary_unary_multi_callable(channel):
+    return channel.unary_unary(_UNARY_UNARY)
+
+
+def _unary_stream_multi_callable(channel):
+    return channel.unary_stream(
+        _UNARY_STREAM,
+        request_serializer=_SERIALIZE_REQUEST,
+        response_deserializer=_DESERIALIZE_RESPONSE)
+
+
+def _stream_unary_multi_callable(channel):
+    return channel.stream_unary(
+        _STREAM_UNARY,
+        request_serializer=_SERIALIZE_REQUEST,
+        response_deserializer=_DESERIALIZE_RESPONSE)
+
+
+def _stream_stream_multi_callable(channel):
+    return channel.stream_stream(_STREAM_STREAM)
+
+
+class _ClientCallDetails(
+        collections.namedtuple('_ClientCallDetails',
+                               ('method', 'timeout', 'metadata',
+                                'credentials')), grpc.ClientCallDetails):
+    pass
+
+
+class _GenericClientInterceptor(
+        grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor,
+        grpc.StreamUnaryClientInterceptor, grpc.StreamStreamClientInterceptor):
+
+    def __init__(self, interceptor_function):
+        self._fn = interceptor_function
+
+    def intercept_unary_unary(self, continuation, client_call_details, request):
+        new_details, new_request_iterator, postprocess = self._fn(
+            client_call_details, iter((request,)), False, False)
+        response = continuation(new_details, next(new_request_iterator))
+        return postprocess(response) if postprocess else response
+
+    def intercept_unary_stream(self, continuation, client_call_details,
+                               request):
+        new_details, new_request_iterator, postprocess = self._fn(
+            client_call_details, iter((request,)), False, True)
+        response_it = continuation(new_details, new_request_iterator)
+        return postprocess(response_it) if postprocess else response_it
+
+    def intercept_stream_unary(self, continuation, client_call_details,
+                               request_iterator):
+        new_details, new_request_iterator, postprocess = self._fn(
+            client_call_details, request_iterator, True, False)
+        response = continuation(new_details, next(new_request_iterator))
+        return postprocess(response) if postprocess else response
+
+    def intercept_stream_stream(self, continuation, client_call_details,
+                                request_iterator):
+        new_details, new_request_iterator, postprocess = self._fn(
+            client_call_details, request_iterator, True, True)
+        response_it = continuation(new_details, new_request_iterator)
+        return postprocess(response_it) if postprocess else response_it
+
+
+class _LoggingInterceptor(
+        grpc.ServerInterceptor, grpc.UnaryUnaryClientInterceptor,
+        grpc.UnaryStreamClientInterceptor, grpc.StreamUnaryClientInterceptor,
+        grpc.StreamStreamClientInterceptor):
+
+    def __init__(self, tag, record):
+        self.tag = tag
+        self.record = record
+
+    def intercept_service(self, continuation, handler_call_details):
+        self.record.append(self.tag + ':intercept_service')
+        return continuation(handler_call_details)
+
+    def intercept_unary_unary(self, continuation, client_call_details, request):
+        self.record.append(self.tag + ':intercept_unary_unary')
+        return continuation(client_call_details, request)
+
+    def intercept_unary_stream(self, continuation, client_call_details,
+                               request):
+        self.record.append(self.tag + ':intercept_unary_stream')
+        return continuation(client_call_details, request)
+
+    def intercept_stream_unary(self, continuation, client_call_details,
+                               request_iterator):
+        self.record.append(self.tag + ':intercept_stream_unary')
+        return continuation(client_call_details, request_iterator)
+
+    def intercept_stream_stream(self, continuation, client_call_details,
+                                request_iterator):
+        self.record.append(self.tag + ':intercept_stream_stream')
+        return continuation(client_call_details, request_iterator)
+
+
+class _DefectiveClientInterceptor(grpc.UnaryUnaryClientInterceptor):
+
+    def intercept_unary_unary(self, ignored_continuation,
+                              ignored_client_call_details, ignored_request):
+        raise test_control.Defect()
+
+
+def _wrap_request_iterator_stream_interceptor(wrapper):
+
+    def intercept_call(client_call_details, request_iterator, request_streaming,
+                       ignored_response_streaming):
+        if request_streaming:
+            return client_call_details, wrapper(request_iterator), None
+        else:
+            return client_call_details, request_iterator, None
+
+    return _GenericClientInterceptor(intercept_call)
+
+
+def _append_request_header_interceptor(header, value):
+
+    def intercept_call(client_call_details, request_iterator,
+                       ignored_request_streaming, ignored_response_streaming):
+        metadata = []
+        if client_call_details.metadata:
+            metadata = list(client_call_details.metadata)
+        metadata.append((header, value,))
+        client_call_details = _ClientCallDetails(
+            client_call_details.method, client_call_details.timeout, metadata,
+            client_call_details.credentials)
+        return client_call_details, request_iterator, None
+
+    return _GenericClientInterceptor(intercept_call)
+
+
+class _GenericServerInterceptor(grpc.ServerInterceptor):
+
+    def __init__(self, fn):
+        self._fn = fn
+
+    def intercept_service(self, continuation, handler_call_details):
+        return self._fn(continuation, handler_call_details)
+
+
+def _filter_server_interceptor(condition, interceptor):
+
+    def intercept_service(continuation, handler_call_details):
+        if condition(handler_call_details):
+            return interceptor.intercept_service(continuation,
+                                                 handler_call_details)
+        return continuation(handler_call_details)
+
+    return _GenericServerInterceptor(intercept_service)
+
+
+class InterceptorTest(unittest.TestCase):
+
+    def setUp(self):
+        self._control = test_control.PauseFailControl()
+        self._handler = _Handler(self._control)
+        self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+
+        self._record = []
+        conditional_interceptor = _filter_server_interceptor(
+            lambda x: ('secret', '42') in x.invocation_metadata,
+            _LoggingInterceptor('s3', self._record))
+
+        self._server = grpc.server(
+            self._server_pool,
+            interceptors=(_LoggingInterceptor('s1', self._record),
+                          conditional_interceptor,
+                          _LoggingInterceptor('s2', self._record),))
+        port = self._server.add_insecure_port('[::]:0')
+        self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
+        self._server.start()
+
+        self._channel = grpc.insecure_channel('localhost:%d' % port)
+
+    def tearDown(self):
+        self._server.stop(None)
+        self._server_pool.shutdown(wait=True)
+
+    def testTripleRequestMessagesClientInterceptor(self):
+
+        def triple(request_iterator):
+            while True:
+                try:
+                    item = next(request_iterator)
+                    yield item
+                    yield item
+                    yield item
+                except StopIteration:
+                    break
+
+        interceptor = _wrap_request_iterator_stream_interceptor(triple)
+        channel = grpc.intercept_channel(self._channel, interceptor)
+        requests = tuple(b'\x07\x08'
+                         for _ in range(test_constants.STREAM_LENGTH))
+
+        multi_callable = _stream_stream_multi_callable(channel)
+        response_iterator = multi_callable(
+            iter(requests),
+            metadata=(
+                ('test',
+                 'InterceptedStreamRequestBlockingUnaryResponseWithCall'),))
+
+        responses = tuple(response_iterator)
+        self.assertEqual(len(responses), 3 * test_constants.STREAM_LENGTH)
+
+        multi_callable = _stream_stream_multi_callable(self._channel)
+        response_iterator = multi_callable(
+            iter(requests),
+            metadata=(
+                ('test',
+                 'InterceptedStreamRequestBlockingUnaryResponseWithCall'),))
+
+        responses = tuple(response_iterator)
+        self.assertEqual(len(responses), test_constants.STREAM_LENGTH)
+
+    def testDefectiveClientInterceptor(self):
+        interceptor = _DefectiveClientInterceptor()
+        defective_channel = grpc.intercept_channel(self._channel, interceptor)
+
+        request = b'\x07\x08'
+
+        multi_callable = _unary_unary_multi_callable(defective_channel)
+        call_future = multi_callable.future(
+            request,
+            metadata=(
+                ('test', 'InterceptedUnaryRequestBlockingUnaryResponse'),))
+
+        self.assertIsNotNone(call_future.exception())
+        self.assertEqual(call_future.code(), grpc.StatusCode.INTERNAL)
+
+    def testInterceptedHeaderManipulationWithServerSideVerification(self):
+        request = b'\x07\x08'
+
+        channel = grpc.intercept_channel(
+            self._channel, _append_request_header_interceptor('secret', '42'))
+        channel = grpc.intercept_channel(
+            channel,
+            _LoggingInterceptor('c1', self._record),
+            _LoggingInterceptor('c2', self._record))
+
+        self._record[:] = []
+
+        multi_callable = _unary_unary_multi_callable(channel)
+        multi_callable.with_call(
+            request,
+            metadata=(
+                ('test',
+                 'InterceptedUnaryRequestBlockingUnaryResponseWithCall'),))
+
+        self.assertSequenceEqual(self._record, [
+            'c1:intercept_unary_unary', 'c2:intercept_unary_unary',
+            's1:intercept_service', 's3:intercept_service',
+            's2:intercept_service'
+        ])
+
+    def testInterceptedUnaryRequestBlockingUnaryResponse(self):
+        request = b'\x07\x08'
+
+        self._record[:] = []
+
+        channel = grpc.intercept_channel(
+            self._channel,
+            _LoggingInterceptor('c1', self._record),
+            _LoggingInterceptor('c2', self._record))
+
+        multi_callable = _unary_unary_multi_callable(channel)
+        multi_callable(
+            request,
+            metadata=(
+                ('test', 'InterceptedUnaryRequestBlockingUnaryResponse'),))
+
+        self.assertSequenceEqual(self._record, [
+            'c1:intercept_unary_unary', 'c2:intercept_unary_unary',
+            's1:intercept_service', 's2:intercept_service'
+        ])
+
+    def testInterceptedUnaryRequestBlockingUnaryResponseWithCall(self):
+        request = b'\x07\x08'
+
+        channel = grpc.intercept_channel(
+            self._channel,
+            _LoggingInterceptor('c1', self._record),
+            _LoggingInterceptor('c2', self._record))
+
+        self._record[:] = []
+
+        multi_callable = _unary_unary_multi_callable(channel)
+        multi_callable.with_call(
+            request,
+            metadata=(
+                ('test',
+                 'InterceptedUnaryRequestBlockingUnaryResponseWithCall'),))
+
+        self.assertSequenceEqual(self._record, [
+            'c1:intercept_unary_unary', 'c2:intercept_unary_unary',
+            's1:intercept_service', 's2:intercept_service'
+        ])
+
+    def testInterceptedUnaryRequestFutureUnaryResponse(self):
+        request = b'\x07\x08'
+
+        self._record[:] = []
+        channel = grpc.intercept_channel(
+            self._channel,
+            _LoggingInterceptor('c1', self._record),
+            _LoggingInterceptor('c2', self._record))
+
+        multi_callable = _unary_unary_multi_callable(channel)
+        response_future = multi_callable.future(
+            request,
+            metadata=(('test', 'InterceptedUnaryRequestFutureUnaryResponse'),))
+        response_future.result()
+
+        self.assertSequenceEqual(self._record, [
+            'c1:intercept_unary_unary', 'c2:intercept_unary_unary',
+            's1:intercept_service', 's2:intercept_service'
+        ])
+
+    def testInterceptedUnaryRequestStreamResponse(self):
+        request = b'\x37\x58'
+
+        self._record[:] = []
+        channel = grpc.intercept_channel(
+            self._channel,
+            _LoggingInterceptor('c1', self._record),
+            _LoggingInterceptor('c2', self._record))
+
+        multi_callable = _unary_stream_multi_callable(channel)
+        response_iterator = multi_callable(
+            request,
+            metadata=(('test', 'InterceptedUnaryRequestStreamResponse'),))
+        tuple(response_iterator)
+
+        self.assertSequenceEqual(self._record, [
+            'c1:intercept_unary_stream', 'c2:intercept_unary_stream',
+            's1:intercept_service', 's2:intercept_service'
+        ])
+
+    def testInterceptedStreamRequestBlockingUnaryResponse(self):
+        requests = tuple(b'\x07\x08'
+                         for _ in range(test_constants.STREAM_LENGTH))
+        request_iterator = iter(requests)
+
+        self._record[:] = []
+        channel = grpc.intercept_channel(
+            self._channel,
+            _LoggingInterceptor('c1', self._record),
+            _LoggingInterceptor('c2', self._record))
+
+        multi_callable = _stream_unary_multi_callable(channel)
+        multi_callable(
+            request_iterator,
+            metadata=(
+                ('test', 'InterceptedStreamRequestBlockingUnaryResponse'),))
+
+        self.assertSequenceEqual(self._record, [
+            'c1:intercept_stream_unary', 'c2:intercept_stream_unary',
+            's1:intercept_service', 's2:intercept_service'
+        ])
+
+    def testInterceptedStreamRequestBlockingUnaryResponseWithCall(self):
+        requests = tuple(b'\x07\x08'
+                         for _ in range(test_constants.STREAM_LENGTH))
+        request_iterator = iter(requests)
+
+        self._record[:] = []
+        channel = grpc.intercept_channel(
+            self._channel,
+            _LoggingInterceptor('c1', self._record),
+            _LoggingInterceptor('c2', self._record))
+
+        multi_callable = _stream_unary_multi_callable(channel)
+        multi_callable.with_call(
+            request_iterator,
+            metadata=(
+                ('test',
+                 'InterceptedStreamRequestBlockingUnaryResponseWithCall'),))
+
+        self.assertSequenceEqual(self._record, [
+            'c1:intercept_stream_unary', 'c2:intercept_stream_unary',
+            's1:intercept_service', 's2:intercept_service'
+        ])
+
+    def testInterceptedStreamRequestFutureUnaryResponse(self):
+        requests = tuple(b'\x07\x08'
+                         for _ in range(test_constants.STREAM_LENGTH))
+        request_iterator = iter(requests)
+
+        self._record[:] = []
+        channel = grpc.intercept_channel(
+            self._channel,
+            _LoggingInterceptor('c1', self._record),
+            _LoggingInterceptor('c2', self._record))
+
+        multi_callable = _stream_unary_multi_callable(channel)
+        response_future = multi_callable.future(
+            request_iterator,
+            metadata=(('test', 'InterceptedStreamRequestFutureUnaryResponse'),))
+        response_future.result()
+
+        self.assertSequenceEqual(self._record, [
+            'c1:intercept_stream_unary', 'c2:intercept_stream_unary',
+            's1:intercept_service', 's2:intercept_service'
+        ])
+
+    def testInterceptedStreamRequestStreamResponse(self):
+        requests = tuple(b'\x77\x58'
+                         for _ in range(test_constants.STREAM_LENGTH))
+        request_iterator = iter(requests)
+
+        self._record[:] = []
+        channel = grpc.intercept_channel(
+            self._channel,
+            _LoggingInterceptor('c1', self._record),
+            _LoggingInterceptor('c2', self._record))
+
+        multi_callable = _stream_stream_multi_callable(channel)
+        response_iterator = multi_callable(
+            request_iterator,
+            metadata=(('test', 'InterceptedStreamRequestStreamResponse'),))
+        tuple(response_iterator)
+
+        self.assertSequenceEqual(self._record, [
+            'c1:intercept_stream_stream', 'c2:intercept_stream_stream',
+            's1:intercept_service', 's2:intercept_service'
+        ])
+
+
+if __name__ == '__main__':
+    unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
index 6faab94..cb59cd3 100644
--- a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
+++ b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
@@ -56,6 +56,7 @@
 
     def __init__(self):
         self._lock = threading.Lock()
+        self._abort_call = False
         self._code = None
         self._details = None
         self._exception = False
@@ -67,10 +68,13 @@
             self._received_client_metadata = context.invocation_metadata()
             context.send_initial_metadata(_SERVER_INITIAL_METADATA)
             context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
-            if self._code is not None:
-                context.set_code(self._code)
-            if self._details is not None:
-                context.set_details(self._details)
+            if self._abort_call:
+                context.abort(self._code, self._details)
+            else:
+                if self._code is not None:
+                    context.set_code(self._code)
+                if self._details is not None:
+                    context.set_details(self._details)
             if self._exception:
                 raise test_control.Defect()
             else:
@@ -81,10 +85,13 @@
             self._received_client_metadata = context.invocation_metadata()
             context.send_initial_metadata(_SERVER_INITIAL_METADATA)
             context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
-            if self._code is not None:
-                context.set_code(self._code)
-            if self._details is not None:
-                context.set_details(self._details)
+            if self._abort_call:
+                context.abort(self._code, self._details)
+            else:
+                if self._code is not None:
+                    context.set_code(self._code)
+                if self._details is not None:
+                    context.set_details(self._details)
             for _ in range(test_constants.STREAM_LENGTH // 2):
                 yield _SERIALIZED_RESPONSE
             if self._exception:
@@ -95,14 +102,16 @@
             self._received_client_metadata = context.invocation_metadata()
             context.send_initial_metadata(_SERVER_INITIAL_METADATA)
             context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
-            if self._code is not None:
-                context.set_code(self._code)
-            if self._details is not None:
-                context.set_details(self._details)
             # TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
             # request iterator.
-            for ignored_request in request_iterator:
-                pass
+            list(request_iterator)
+            if self._abort_call:
+                context.abort(self._code, self._details)
+            else:
+                if self._code is not None:
+                    context.set_code(self._code)
+                if self._details is not None:
+                    context.set_details(self._details)
             if self._exception:
                 raise test_control.Defect()
             else:
@@ -113,19 +122,25 @@
             self._received_client_metadata = context.invocation_metadata()
             context.send_initial_metadata(_SERVER_INITIAL_METADATA)
             context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
-            if self._code is not None:
-                context.set_code(self._code)
-            if self._details is not None:
-                context.set_details(self._details)
             # TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
             # request iterator.
-            for ignored_request in request_iterator:
-                pass
+            list(request_iterator)
+            if self._abort_call:
+                context.abort(self._code, self._details)
+            else:
+                if self._code is not None:
+                    context.set_code(self._code)
+                if self._details is not None:
+                    context.set_details(self._details)
             for _ in range(test_constants.STREAM_LENGTH // 3):
                 yield object()
             if self._exception:
                 raise test_control.Defect()
 
+    def set_abort_call(self):
+        with self._lock:
+            self._abort_call = True
+
     def set_code(self, code):
         with self._lock:
             self._code = code
@@ -212,11 +227,10 @@
     def testSuccessfulUnaryStream(self):
         self._servicer.set_details(_DETAILS)
 
-        call = self._unary_stream(
+        response_iterator_call = self._unary_stream(
             _SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
-        received_initial_metadata = call.initial_metadata()
-        for _ in call:
-            pass
+        received_initial_metadata = response_iterator_call.initial_metadata()
+        list(response_iterator_call)
 
         self.assertTrue(
             test_common.metadata_transmitted(
@@ -225,10 +239,11 @@
             test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
                                              received_initial_metadata))
         self.assertTrue(
-            test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
-                                             call.trailing_metadata()))
-        self.assertIs(grpc.StatusCode.OK, call.code())
-        self.assertEqual(_DETAILS, call.details())
+            test_common.metadata_transmitted(
+                _SERVER_TRAILING_METADATA,
+                response_iterator_call.trailing_metadata()))
+        self.assertIs(grpc.StatusCode.OK, response_iterator_call.code())
+        self.assertEqual(_DETAILS, response_iterator_call.details())
 
     def testSuccessfulStreamUnary(self):
         self._servicer.set_details(_DETAILS)
@@ -252,12 +267,11 @@
     def testSuccessfulStreamStream(self):
         self._servicer.set_details(_DETAILS)
 
-        call = self._stream_stream(
+        response_iterator_call = self._stream_stream(
             iter([object()] * test_constants.STREAM_LENGTH),
             metadata=_CLIENT_METADATA)
-        received_initial_metadata = call.initial_metadata()
-        for _ in call:
-            pass
+        received_initial_metadata = response_iterator_call.initial_metadata()
+        list(response_iterator_call)
 
         self.assertTrue(
             test_common.metadata_transmitted(
@@ -266,10 +280,106 @@
             test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
                                              received_initial_metadata))
         self.assertTrue(
-            test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
-                                             call.trailing_metadata()))
-        self.assertIs(grpc.StatusCode.OK, call.code())
-        self.assertEqual(_DETAILS, call.details())
+            test_common.metadata_transmitted(
+                _SERVER_TRAILING_METADATA,
+                response_iterator_call.trailing_metadata()))
+        self.assertIs(grpc.StatusCode.OK, response_iterator_call.code())
+        self.assertEqual(_DETAILS, response_iterator_call.details())
+
+    def testAbortedUnaryUnary(self):
+        self._servicer.set_code(_NON_OK_CODE)
+        self._servicer.set_details(_DETAILS)
+        self._servicer.set_abort_call()
+
+        with self.assertRaises(grpc.RpcError) as exception_context:
+            self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
+
+        self.assertTrue(
+            test_common.metadata_transmitted(
+                _CLIENT_METADATA, self._servicer.received_client_metadata()))
+        self.assertTrue(
+            test_common.metadata_transmitted(
+                _SERVER_INITIAL_METADATA,
+                exception_context.exception.initial_metadata()))
+        self.assertTrue(
+            test_common.metadata_transmitted(
+                _SERVER_TRAILING_METADATA,
+                exception_context.exception.trailing_metadata()))
+        self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+        self.assertEqual(_DETAILS, exception_context.exception.details())
+
+    def testAbortedUnaryStream(self):
+        self._servicer.set_code(_NON_OK_CODE)
+        self._servicer.set_details(_DETAILS)
+        self._servicer.set_abort_call()
+
+        response_iterator_call = self._unary_stream(
+            _SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
+        received_initial_metadata = response_iterator_call.initial_metadata()
+        with self.assertRaises(grpc.RpcError):
+            self.assertEqual(len(list(response_iterator_call)), 0)
+
+        self.assertTrue(
+            test_common.metadata_transmitted(
+                _CLIENT_METADATA, self._servicer.received_client_metadata()))
+        self.assertTrue(
+            test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+                                             received_initial_metadata))
+        self.assertTrue(
+            test_common.metadata_transmitted(
+                _SERVER_TRAILING_METADATA,
+                response_iterator_call.trailing_metadata()))
+        self.assertIs(_NON_OK_CODE, response_iterator_call.code())
+        self.assertEqual(_DETAILS, response_iterator_call.details())
+
+    def testAbortedStreamUnary(self):
+        self._servicer.set_code(_NON_OK_CODE)
+        self._servicer.set_details(_DETAILS)
+        self._servicer.set_abort_call()
+
+        with self.assertRaises(grpc.RpcError) as exception_context:
+            self._stream_unary.with_call(
+                iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
+                metadata=_CLIENT_METADATA)
+
+        self.assertTrue(
+            test_common.metadata_transmitted(
+                _CLIENT_METADATA, self._servicer.received_client_metadata()))
+        self.assertTrue(
+            test_common.metadata_transmitted(
+                _SERVER_INITIAL_METADATA,
+                exception_context.exception.initial_metadata()))
+        self.assertTrue(
+            test_common.metadata_transmitted(
+                _SERVER_TRAILING_METADATA,
+                exception_context.exception.trailing_metadata()))
+        self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+        self.assertEqual(_DETAILS, exception_context.exception.details())
+
+    def testAbortedStreamStream(self):
+        self._servicer.set_code(_NON_OK_CODE)
+        self._servicer.set_details(_DETAILS)
+        self._servicer.set_abort_call()
+
+        response_iterator_call = self._stream_stream(
+            iter([object()] * test_constants.STREAM_LENGTH),
+            metadata=_CLIENT_METADATA)
+        received_initial_metadata = response_iterator_call.initial_metadata()
+        with self.assertRaises(grpc.RpcError):
+            self.assertEqual(len(list(response_iterator_call)), 0)
+
+        self.assertTrue(
+            test_common.metadata_transmitted(
+                _CLIENT_METADATA, self._servicer.received_client_metadata()))
+        self.assertTrue(
+            test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+                                             received_initial_metadata))
+        self.assertTrue(
+            test_common.metadata_transmitted(
+                _SERVER_TRAILING_METADATA,
+                response_iterator_call.trailing_metadata()))
+        self.assertIs(_NON_OK_CODE, response_iterator_call.code())
+        self.assertEqual(_DETAILS, response_iterator_call.details())
 
     def testCustomCodeUnaryUnary(self):
         self._servicer.set_code(_NON_OK_CODE)
@@ -296,12 +406,11 @@
         self._servicer.set_code(_NON_OK_CODE)
         self._servicer.set_details(_DETAILS)
 
-        call = self._unary_stream(
+        response_iterator_call = self._unary_stream(
             _SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
-        received_initial_metadata = call.initial_metadata()
+        received_initial_metadata = response_iterator_call.initial_metadata()
         with self.assertRaises(grpc.RpcError):
-            for _ in call:
-                pass
+            list(response_iterator_call)
 
         self.assertTrue(
             test_common.metadata_transmitted(
@@ -310,10 +419,11 @@
             test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
                                              received_initial_metadata))
         self.assertTrue(
-            test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
-                                             call.trailing_metadata()))
-        self.assertIs(_NON_OK_CODE, call.code())
-        self.assertEqual(_DETAILS, call.details())
+            test_common.metadata_transmitted(
+                _SERVER_TRAILING_METADATA,
+                response_iterator_call.trailing_metadata()))
+        self.assertIs(_NON_OK_CODE, response_iterator_call.code())
+        self.assertEqual(_DETAILS, response_iterator_call.details())
 
     def testCustomCodeStreamUnary(self):
         self._servicer.set_code(_NON_OK_CODE)
@@ -342,13 +452,12 @@
         self._servicer.set_code(_NON_OK_CODE)
         self._servicer.set_details(_DETAILS)
 
-        call = self._stream_stream(
+        response_iterator_call = self._stream_stream(
             iter([object()] * test_constants.STREAM_LENGTH),
             metadata=_CLIENT_METADATA)
-        received_initial_metadata = call.initial_metadata()
+        received_initial_metadata = response_iterator_call.initial_metadata()
         with self.assertRaises(grpc.RpcError) as exception_context:
-            for _ in call:
-                pass
+            list(response_iterator_call)
 
         self.assertTrue(
             test_common.metadata_transmitted(
@@ -390,12 +499,11 @@
         self._servicer.set_details(_DETAILS)
         self._servicer.set_exception()
 
-        call = self._unary_stream(
+        response_iterator_call = self._unary_stream(
             _SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
-        received_initial_metadata = call.initial_metadata()
+        received_initial_metadata = response_iterator_call.initial_metadata()
         with self.assertRaises(grpc.RpcError):
-            for _ in call:
-                pass
+            list(response_iterator_call)
 
         self.assertTrue(
             test_common.metadata_transmitted(
@@ -404,10 +512,11 @@
             test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
                                              received_initial_metadata))
         self.assertTrue(
-            test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
-                                             call.trailing_metadata()))
-        self.assertIs(_NON_OK_CODE, call.code())
-        self.assertEqual(_DETAILS, call.details())
+            test_common.metadata_transmitted(
+                _SERVER_TRAILING_METADATA,
+                response_iterator_call.trailing_metadata()))
+        self.assertIs(_NON_OK_CODE, response_iterator_call.code())
+        self.assertEqual(_DETAILS, response_iterator_call.details())
 
     def testCustomCodeExceptionStreamUnary(self):
         self._servicer.set_code(_NON_OK_CODE)
@@ -438,13 +547,12 @@
         self._servicer.set_details(_DETAILS)
         self._servicer.set_exception()
 
-        call = self._stream_stream(
+        response_iterator_call = self._stream_stream(
             iter([object()] * test_constants.STREAM_LENGTH),
             metadata=_CLIENT_METADATA)
-        received_initial_metadata = call.initial_metadata()
+        received_initial_metadata = response_iterator_call.initial_metadata()
         with self.assertRaises(grpc.RpcError):
-            for _ in call:
-                pass
+            list(response_iterator_call)
 
         self.assertTrue(
             test_common.metadata_transmitted(
@@ -453,10 +561,11 @@
             test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
                                              received_initial_metadata))
         self.assertTrue(
-            test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
-                                             call.trailing_metadata()))
-        self.assertIs(_NON_OK_CODE, call.code())
-        self.assertEqual(_DETAILS, call.details())
+            test_common.metadata_transmitted(
+                _SERVER_TRAILING_METADATA,
+                response_iterator_call.trailing_metadata()))
+        self.assertIs(_NON_OK_CODE, response_iterator_call.code())
+        self.assertEqual(_DETAILS, response_iterator_call.details())
 
     def testCustomCodeReturnNoneUnaryUnary(self):
         self._servicer.set_code(_NON_OK_CODE)
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_test.py b/src/python/grpcio_tests/tests/unit/_metadata_test.py
index 557d527..0669486 100644
--- a/src/python/grpcio_tests/tests/unit/_metadata_test.py
+++ b/src/python/grpcio_tests/tests/unit/_metadata_test.py
@@ -34,16 +34,19 @@
 _STREAM_UNARY = '/test/StreamUnary'
 _STREAM_STREAM = '/test/StreamStream'
 
-_CLIENT_METADATA = (('client-md-key', 'client-md-key'),
-                    ('client-md-key-bin', b'\x00\x01'))
+_INVOCATION_METADATA = ((b'invocation-md-key', u'invocation-md-value',),
+                        (u'invocation-md-key-bin', b'\x00\x01',),)
+_EXPECTED_INVOCATION_METADATA = (('invocation-md-key', 'invocation-md-value',),
+                                 ('invocation-md-key-bin', b'\x00\x01',),)
 
-_SERVER_INITIAL_METADATA = (
-    ('server-initial-md-key', 'server-initial-md-value'),
-    ('server-initial-md-key-bin', b'\x00\x02'))
+_INITIAL_METADATA = ((b'initial-md-key', u'initial-md-value'),
+                     (u'initial-md-key-bin', b'\x00\x02'))
+_EXPECTED_INITIAL_METADATA = (('initial-md-key', 'initial-md-value',),
+                              ('initial-md-key-bin', b'\x00\x02',),)
 
-_SERVER_TRAILING_METADATA = (
-    ('server-trailing-md-key', 'server-trailing-md-value'),
-    ('server-trailing-md-key-bin', b'\x00\x03'))
+_TRAILING_METADATA = (('server-trailing-md-key', 'server-trailing-md-value',),
+                      ('server-trailing-md-key-bin', b'\x00\x03',),)
+_EXPECTED_TRAILING_METADATA = _TRAILING_METADATA
 
 
 def user_agent(metadata):
@@ -56,7 +59,8 @@
 def validate_client_metadata(test, servicer_context):
     test.assertTrue(
         test_common.metadata_transmitted(
-            _CLIENT_METADATA, servicer_context.invocation_metadata()))
+            _EXPECTED_INVOCATION_METADATA,
+            servicer_context.invocation_metadata()))
     test.assertTrue(
         user_agent(servicer_context.invocation_metadata())
         .startswith('primary-agent ' + _channel._USER_AGENT))
@@ -67,23 +71,23 @@
 
 def handle_unary_unary(test, request, servicer_context):
     validate_client_metadata(test, servicer_context)
-    servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
-    servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+    servicer_context.send_initial_metadata(_INITIAL_METADATA)
+    servicer_context.set_trailing_metadata(_TRAILING_METADATA)
     return _RESPONSE
 
 
 def handle_unary_stream(test, request, servicer_context):
     validate_client_metadata(test, servicer_context)
-    servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
-    servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+    servicer_context.send_initial_metadata(_INITIAL_METADATA)
+    servicer_context.set_trailing_metadata(_TRAILING_METADATA)
     for _ in range(test_constants.STREAM_LENGTH):
         yield _RESPONSE
 
 
 def handle_stream_unary(test, request_iterator, servicer_context):
     validate_client_metadata(test, servicer_context)
-    servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
-    servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+    servicer_context.send_initial_metadata(_INITIAL_METADATA)
+    servicer_context.set_trailing_metadata(_TRAILING_METADATA)
     # TODO(issue:#6891) We should be able to remove this loop
     for request in request_iterator:
         pass
@@ -92,8 +96,8 @@
 
 def handle_stream_stream(test, request_iterator, servicer_context):
     validate_client_metadata(test, servicer_context)
-    servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
-    servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+    servicer_context.send_initial_metadata(_INITIAL_METADATA)
+    servicer_context.set_trailing_metadata(_TRAILING_METADATA)
     # TODO(issue:#6891) We should be able to remove this loop,
     # and replace with return; yield
     for request in request_iterator:
@@ -156,50 +160,50 @@
     def testUnaryUnary(self):
         multi_callable = self._channel.unary_unary(_UNARY_UNARY)
         unused_response, call = multi_callable.with_call(
-            _REQUEST, metadata=_CLIENT_METADATA)
+            _REQUEST, metadata=_INVOCATION_METADATA)
         self.assertTrue(
-            test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+            test_common.metadata_transmitted(_EXPECTED_INITIAL_METADATA,
                                              call.initial_metadata()))
         self.assertTrue(
-            test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+            test_common.metadata_transmitted(_EXPECTED_TRAILING_METADATA,
                                              call.trailing_metadata()))
 
     def testUnaryStream(self):
         multi_callable = self._channel.unary_stream(_UNARY_STREAM)
-        call = multi_callable(_REQUEST, metadata=_CLIENT_METADATA)
+        call = multi_callable(_REQUEST, metadata=_INVOCATION_METADATA)
         self.assertTrue(
-            test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+            test_common.metadata_transmitted(_EXPECTED_INITIAL_METADATA,
                                              call.initial_metadata()))
         for _ in call:
             pass
         self.assertTrue(
-            test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+            test_common.metadata_transmitted(_EXPECTED_TRAILING_METADATA,
                                              call.trailing_metadata()))
 
     def testStreamUnary(self):
         multi_callable = self._channel.stream_unary(_STREAM_UNARY)
         unused_response, call = multi_callable.with_call(
             iter([_REQUEST] * test_constants.STREAM_LENGTH),
-            metadata=_CLIENT_METADATA)
+            metadata=_INVOCATION_METADATA)
         self.assertTrue(
-            test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+            test_common.metadata_transmitted(_EXPECTED_INITIAL_METADATA,
                                              call.initial_metadata()))
         self.assertTrue(
-            test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+            test_common.metadata_transmitted(_EXPECTED_TRAILING_METADATA,
                                              call.trailing_metadata()))
 
     def testStreamStream(self):
         multi_callable = self._channel.stream_stream(_STREAM_STREAM)
         call = multi_callable(
             iter([_REQUEST] * test_constants.STREAM_LENGTH),
-            metadata=_CLIENT_METADATA)
+            metadata=_INVOCATION_METADATA)
         self.assertTrue(
-            test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
+            test_common.metadata_transmitted(_EXPECTED_INITIAL_METADATA,
                                              call.initial_metadata()))
         for _ in call:
             pass
         self.assertTrue(
-            test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
+            test_common.metadata_transmitted(_EXPECTED_TRAILING_METADATA,
                                              call.trailing_metadata()))
 
 
diff --git a/src/ruby/end2end/channel_closing_client.rb b/src/ruby/end2end/channel_closing_client.rb
index 8f6888c..62c7421 100755
--- a/src/ruby/end2end/channel_closing_client.rb
+++ b/src/ruby/end2end/channel_closing_client.rb
@@ -44,7 +44,7 @@
   ch = GRPC::Core::Channel.new("localhost:#{server_port}", {},
                                :this_channel_is_insecure)
 
-  srv = GRPC::RpcServer.new
+  srv = new_rpc_server_for_testing
   thd = Thread.new do
     srv.add_http2_port("0.0.0.0:#{client_control_port}", :this_port_is_insecure)
     srv.handle(ChannelClosingClientController.new(ch))
diff --git a/src/ruby/end2end/end2end_common.rb b/src/ruby/end2end/end2end_common.rb
index 790fc23..ffbaa19 100755
--- a/src/ruby/end2end/end2end_common.rb
+++ b/src/ruby/end2end/end2end_common.rb
@@ -29,6 +29,9 @@
 require 'thread'
 require 'timeout'
 require 'English' # see https://github.com/bbatsov/rubocop/issues/1747
+require_relative '../spec/support/helpers'
+
+include GRPC::Spec::Helpers
 
 # GreeterServer is simple server that implements the Helloworld Greeter server.
 class EchoServerImpl < Echo::EchoServer::Service
@@ -46,7 +49,7 @@
   end
 
   def run
-    @srv = GRPC::RpcServer.new(@rpc_server_args)
+    @srv = new_rpc_server_for_testing(@rpc_server_args)
     port = @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
     @srv.handle(@service_impl)
 
diff --git a/src/ruby/end2end/sig_handling_client.rb b/src/ruby/end2end/sig_handling_client.rb
index 129ad7c..6cd289a 100755
--- a/src/ruby/end2end/sig_handling_client.rb
+++ b/src/ruby/end2end/sig_handling_client.rb
@@ -66,7 +66,7 @@
 
   # The "shutdown" RPC should end very quickly.
   # Allow a few seconds to be safe.
-  srv = GRPC::RpcServer.new(poll_period: 3)
+  srv = new_rpc_server_for_testing(poll_period: 3)
   srv.add_http2_port("0.0.0.0:#{client_control_port}",
                      :this_port_is_insecure)
   stub = Echo::EchoServer::Stub.new("localhost:#{server_port}",
diff --git a/src/ruby/ext/grpc/extconf.rb b/src/ruby/ext/grpc/extconf.rb
index 9d2cf2a..c1a0c56 100644
--- a/src/ruby/ext/grpc/extconf.rb
+++ b/src/ruby/ext/grpc/extconf.rb
@@ -61,7 +61,7 @@
 ENV['EMBED_CARES'] = 'true'
 ENV['ARCH_FLAGS'] = RbConfig::CONFIG['ARCH_FLAG']
 ENV['ARCH_FLAGS'] = '-arch i386 -arch x86_64' if RUBY_PLATFORM =~ /darwin/
-ENV['CFLAGS'] = '-DGPR_BACKWARDS_COMPATIBILITY_MODE'
+ENV['CPPFLAGS'] = '-DGPR_BACKWARDS_COMPATIBILITY_MODE'
 
 output_dir = File.expand_path(RbConfig::CONFIG['topdir'])
 grpc_lib_dir = File.join(output_dir, 'libs', grpc_config)
diff --git a/src/ruby/spec/channel_connection_spec.rb b/src/ruby/spec/channel_connection_spec.rb
index ce3e3b1..5c31f41 100644
--- a/src/ruby/spec/channel_connection_spec.rb
+++ b/src/ruby/spec/channel_connection_spec.rb
@@ -16,9 +16,10 @@
 
 include Timeout
 include GRPC::Core
+include GRPC::Spec::Helpers
 
 def start_server(port = 0)
-  @srv = GRPC::RpcServer.new(pool_size: 1)
+  @srv = new_rpc_server_for_testing(pool_size: 1)
   server_port = @srv.add_http2_port("localhost:#{port}", :this_port_is_insecure)
   @srv.handle(EchoService)
   @server_thd = Thread.new { @srv.run }
diff --git a/src/ruby/spec/client_auth_spec.rb b/src/ruby/spec/client_auth_spec.rb
index 79c9192..b955ad2 100644
--- a/src/ruby/spec/client_auth_spec.rb
+++ b/src/ruby/spec/client_auth_spec.rb
@@ -95,7 +95,7 @@
     server_opts = {
       poll_period: 1
     }
-    @srv = RpcServer.new(**server_opts)
+    @srv = new_rpc_server_for_testing(**server_opts)
     port = @srv.add_http2_port('0.0.0.0:0', create_server_creds)
     @srv.handle(SslTestService)
     @srv_thd = Thread.new { @srv.run }
diff --git a/src/ruby/spec/client_server_spec.rb b/src/ruby/spec/client_server_spec.rb
index adab8c9..14ad369 100644
--- a/src/ruby/spec/client_server_spec.rb
+++ b/src/ruby/spec/client_server_spec.rb
@@ -542,7 +542,7 @@
 describe 'the http client/server' do
   before(:example) do
     server_host = '0.0.0.0:0'
-    @server = GRPC::Core::Server.new(nil)
+    @server = new_core_server_for_testing(nil)
     server_port = @server.add_http2_port(server_host, :this_port_is_insecure)
     @server.start
     @ch = Channel.new("0.0.0.0:#{server_port}", nil, :this_channel_is_insecure)
@@ -574,7 +574,7 @@
     server_host = '0.0.0.0:0'
     server_creds = GRPC::Core::ServerCredentials.new(
       nil, [{ private_key: certs[1], cert_chain: certs[2] }], false)
-    @server = GRPC::Core::Server.new(nil)
+    @server = new_core_server_for_testing(nil)
     server_port = @server.add_http2_port(server_host, server_creds)
     @server.start
     args = { Channel::SSL_TARGET => 'foo.test.google.fr' }
diff --git a/src/ruby/spec/generic/active_call_spec.rb b/src/ruby/spec/generic/active_call_spec.rb
index 120acc3..135d1f2 100644
--- a/src/ruby/spec/generic/active_call_spec.rb
+++ b/src/ruby/spec/generic/active_call_spec.rb
@@ -40,7 +40,7 @@
   before(:each) do
     @pass_through = proc { |x| x }
     host = '0.0.0.0:0'
-    @server = GRPC::Core::Server.new(nil)
+    @server = new_core_server_for_testing(nil)
     server_port = @server.add_http2_port(host, :this_port_is_insecure)
     @server.start
     @ch = GRPC::Core::Channel.new("0.0.0.0:#{server_port}", nil,
diff --git a/src/ruby/spec/generic/client_stub_spec.rb b/src/ruby/spec/generic/client_stub_spec.rb
index 9539e56..79eeca9 100644
--- a/src/ruby/spec/generic/client_stub_spec.rb
+++ b/src/ruby/spec/generic/client_stub_spec.rb
@@ -888,12 +888,12 @@
     secure_credentials = GRPC::Core::ServerCredentials.new(
       nil, [{ private_key: certs[1], cert_chain: certs[2] }], false)
 
-    @server = GRPC::Core::Server.new(nil)
+    @server = new_core_server_for_testing(nil)
     @server.add_http2_port('0.0.0.0:0', secure_credentials)
   end
 
   def create_test_server
-    @server = GRPC::Core::Server.new(nil)
+    @server = new_core_server_for_testing(nil)
     @server.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
   end
 
diff --git a/src/ruby/spec/generic/interceptor_registry_spec.rb b/src/ruby/spec/generic/interceptor_registry_spec.rb
index f93f5ce..eb75d1e 100644
--- a/src/ruby/spec/generic/interceptor_registry_spec.rb
+++ b/src/ruby/spec/generic/interceptor_registry_spec.rb
@@ -14,7 +14,7 @@
 require 'spec_helper'
 
 describe GRPC::InterceptorRegistry do
-  let(:server) { RpcServer.new }
+  let(:server) { new_rpc_server_for_testing }
   let(:interceptor) { TestServerInterceptor.new }
   let(:interceptors) { [interceptor] }
   let(:registry) { described_class.new(interceptors) }
diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb
index 05059fb..e072d0c 100644
--- a/src/ruby/spec/generic/rpc_server_spec.rb
+++ b/src/ruby/spec/generic/rpc_server_spec.rb
@@ -172,7 +172,7 @@
     it 'can be created with just some args' do
       opts = { server_args: { a_channel_arg: 'an_arg' } }
       blk = proc do
-        RpcServer.new(**opts)
+        new_rpc_server_for_testing(**opts)
       end
       expect(&blk).not_to raise_error
     end
@@ -183,7 +183,7 @@
           server_args: { a_channel_arg: 'an_arg' },
           creds: Object.new
         }
-        RpcServer.new(**opts)
+        new_rpc_server_for_testing(**opts)
       end
       expect(&blk).to raise_error
     end
@@ -192,7 +192,7 @@
   describe '#stopped?' do
     before(:each) do
       opts = { server_args: { a_channel_arg: 'an_arg' }, poll_period: 1.5 }
-      @srv = RpcServer.new(**opts)
+      @srv = new_rpc_server_for_testing(**opts)
       @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
     end
 
@@ -224,7 +224,7 @@
       opts = {
         server_args: { a_channel_arg: 'an_arg' }
       }
-      r = RpcServer.new(**opts)
+      r = new_rpc_server_for_testing(**opts)
       expect(r.running?).to be(false)
     end
 
@@ -233,7 +233,7 @@
         server_args: { a_channel_arg: 'an_arg' },
         poll_period: 2
       }
-      r = RpcServer.new(**opts)
+      r = new_rpc_server_for_testing(**opts)
       r.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
       expect { r.run }.to raise_error(RuntimeError)
     end
@@ -243,7 +243,7 @@
         server_args: { a_channel_arg: 'an_arg' },
         poll_period: 2.5
       }
-      r = RpcServer.new(**opts)
+      r = new_rpc_server_for_testing(**opts)
       r.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
       r.handle(EchoService)
       t = Thread.new { r.run }
@@ -257,7 +257,7 @@
   describe '#handle' do
     before(:each) do
       @opts = { server_args: { a_channel_arg: 'an_arg' }, poll_period: 1 }
-      @srv = RpcServer.new(**@opts)
+      @srv = new_rpc_server_for_testing(**@opts)
       @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
     end
 
@@ -303,7 +303,7 @@
         server_opts = {
           poll_period: 1
         }
-        @srv = RpcServer.new(**server_opts)
+        @srv = new_rpc_server_for_testing(**server_opts)
         server_port = @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
         @host = "localhost:#{server_port}"
         @ch = GRPC::Core::Channel.new(@host, nil, :this_channel_is_insecure)
@@ -474,7 +474,7 @@
           poll_period: 1,
           max_waiting_requests: 1
         }
-        alt_srv = RpcServer.new(**opts)
+        alt_srv = new_rpc_server_for_testing(**opts)
         alt_srv.handle(SlowService)
         alt_port = alt_srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
         alt_host = "0.0.0.0:#{alt_port}"
@@ -538,7 +538,7 @@
           poll_period: 1,
           connect_md_proc: test_md_proc
         }
-        @srv = RpcServer.new(**server_opts)
+        @srv = new_rpc_server_for_testing(**server_opts)
         alt_port = @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
         @alt_host = "0.0.0.0:#{alt_port}"
       end
@@ -573,7 +573,7 @@
         server_opts = {
           poll_period: 1
         }
-        @srv = RpcServer.new(**server_opts)
+        @srv = new_rpc_server_for_testing(**server_opts)
         alt_port = @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
         @alt_host = "0.0.0.0:#{alt_port}"
       end
@@ -624,7 +624,7 @@
         server_opts = {
           poll_period: 1
         }
-        @srv = RpcServer.new(**server_opts)
+        @srv = new_rpc_server_for_testing(**server_opts)
         alt_port = @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
         @alt_host = "0.0.0.0:#{alt_port}"
 
diff --git a/src/ruby/spec/google_rpc_status_utils_spec.rb b/src/ruby/spec/google_rpc_status_utils_spec.rb
index 6f2a06b..3263589 100644
--- a/src/ruby/spec/google_rpc_status_utils_spec.rb
+++ b/src/ruby/spec/google_rpc_status_utils_spec.rb
@@ -19,6 +19,7 @@
 require 'google/protobuf/well_known_types'
 
 include GRPC::Core
+include GRPC::Spec::Helpers
 
 describe 'conversion from a status struct to a google protobuf status' do
   it 'fails if the input is not a status struct' do
@@ -150,7 +151,7 @@
 
 describe 'receving a google rpc status from a remote endpoint' do
   def start_server(encoded_rpc_status)
-    @srv = GRPC::RpcServer.new(pool_size: 1)
+    @srv = new_rpc_server_for_testing(pool_size: 1)
     @server_port = @srv.add_http2_port('localhost:0',
                                        :this_port_is_insecure)
     @srv.handle(GoogleRpcStatusTestService.new(encoded_rpc_status))
@@ -238,7 +239,7 @@
 
 describe 'when the endpoint doesnt send grpc-status-details-bin' do
   def start_server
-    @srv = GRPC::RpcServer.new(pool_size: 1)
+    @srv = new_rpc_server_for_testing(pool_size: 1)
     @server_port = @srv.add_http2_port('localhost:0',
                                        :this_port_is_insecure)
     @srv.handle(NoStatusDetailsBinTestService)
diff --git a/src/ruby/spec/pb/health/checker_spec.rb b/src/ruby/spec/pb/health/checker_spec.rb
index c79ccfd..58a6023 100644
--- a/src/ruby/spec/pb/health/checker_spec.rb
+++ b/src/ruby/spec/pb/health/checker_spec.rb
@@ -192,7 +192,7 @@
       server_opts = {
         poll_period: 1
       }
-      @srv = RpcServer.new(**server_opts)
+      @srv = new_rpc_server_for_testing(**server_opts)
       server_port = @srv.add_http2_port(server_host, :this_port_is_insecure)
       @host = "localhost:#{server_port}"
       @ch = GRPC::Core::Channel.new(@host, nil, :this_channel_is_insecure)
diff --git a/src/ruby/spec/server_spec.rb b/src/ruby/spec/server_spec.rb
index c0a5957..a0d27b6 100644
--- a/src/ruby/spec/server_spec.rb
+++ b/src/ruby/spec/server_spec.rb
@@ -30,12 +30,12 @@
 
   describe '#start' do
     it 'runs without failing' do
-      blk = proc { Server.new(nil).start }
+      blk = proc { new_core_server_for_testing(nil).start }
       expect(&blk).to_not raise_error
     end
 
     it 'fails if the server is closed' do
-      s = Server.new(nil)
+      s = new_core_server_for_testing(nil)
       s.close
       expect { s.start }.to raise_error(RuntimeError)
     end
@@ -85,7 +85,7 @@
     describe 'for insecure servers' do
       it 'runs without failing' do
         blk = proc do
-          s = Server.new(nil)
+          s = new_core_server_for_testing(nil)
           s.add_http2_port('localhost:0', :this_port_is_insecure)
           s.close
         end
@@ -93,7 +93,7 @@
       end
 
       it 'fails if the server is closed' do
-        s = Server.new(nil)
+        s = new_core_server_for_testing(nil)
         s.close
         blk = proc do
           s.add_http2_port('localhost:0', :this_port_is_insecure)
@@ -106,7 +106,7 @@
       let(:cert) { create_test_cert }
       it 'runs without failing' do
         blk = proc do
-          s = Server.new(nil)
+          s = new_core_server_for_testing(nil)
           s.add_http2_port('localhost:0', cert)
           s.close
         end
@@ -114,7 +114,7 @@
       end
 
       it 'fails if the server is closed' do
-        s = Server.new(nil)
+        s = new_core_server_for_testing(nil)
         s.close
         blk = proc { s.add_http2_port('localhost:0', cert) }
         expect(&blk).to raise_error(RuntimeError)
@@ -124,7 +124,7 @@
 
   shared_examples '#new' do
     it 'takes nil channel args' do
-      expect { Server.new(nil) }.to_not raise_error
+      expect { new_core_server_for_testing(nil) }.to_not raise_error
     end
 
     it 'does not take a hash with bad keys as channel args' do
@@ -175,14 +175,14 @@
 
   describe '#new with an insecure channel' do
     def construct_with_args(a)
-      proc { Server.new(a) }
+      proc { new_core_server_for_testing(a) }
     end
 
     it_behaves_like '#new'
   end
 
   def start_a_server
-    s = Server.new(nil)
+    s = new_core_server_for_testing(nil)
     s.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
     s.start
     s
diff --git a/src/ruby/spec/support/helpers.rb b/src/ruby/spec/support/helpers.rb
index 65fffff..29028df 100644
--- a/src/ruby/spec/support/helpers.rb
+++ b/src/ruby/spec/support/helpers.rb
@@ -31,7 +31,7 @@
       #
       def build_rpc_server(server_opts: {},
                            client_opts: {})
-        @server = RpcServer.new({ poll_period: 1 }.merge(server_opts))
+        @server = new_rpc_server_for_testing({ poll_period: 1 }.merge(server_opts))
         @port = @server.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
         @host = "0.0.0.0:#{@port}"
         @client_opts = client_opts
@@ -68,6 +68,40 @@
         opts ||= @client_opts
         klass.new(host, :this_channel_is_insecure, **opts)
       end
+
+      ##
+      # Build an RPCServer for use in tests. Adds args
+      # that are useful for all tests.
+      #
+      # @param [Hash] server_opts
+      #
+      def new_rpc_server_for_testing(server_opts = {})
+        server_opts[:server_args] ||= {}
+        update_server_args_hash(server_opts[:server_args])
+        RpcServer.new(**server_opts)
+      end
+
+      ##
+      # Build an GRPC::Core::Server for use in tests. Adds args
+      # that are useful for all tests.
+      #
+      # @param [Hash] server_args
+      #
+      def new_core_server_for_testing(server_args)
+        server_args.nil? && server_args = {}
+        update_server_args_hash(server_args)
+        GRPC::Core::Server.new(server_args)
+      end
+
+      def update_server_args_hash(server_args)
+        so_reuseport_arg = 'grpc.so_reuseport'
+        unless server_args[so_reuseport_arg].nil?
+          fail 'Unexpected. grpc.so_reuseport already set.'
+        end
+        # Run tests without so_reuseport to eliminate the chance of
+        # cross-talk.
+        server_args[so_reuseport_arg] = 0
+      end
     end
   end
 end
diff --git a/templates/CMakeLists.txt.template b/templates/CMakeLists.txt.template
index f893710..5ef34e4 100644
--- a/templates/CMakeLists.txt.template
+++ b/templates/CMakeLists.txt.template
@@ -269,6 +269,7 @@
       add_subdirectory(<%text>${BORINGSSL_ROOT_DIR}</%text> third_party/boringssl)
       if(TARGET ssl)
         set(_gRPC_SSL_LIBRARIES ssl)
+        set(_gRPC_SSL_INCLUDE_DIR <%text>${BORINGSSL_ROOT_DIR}</%text>/include)
       endif()
     else()
         message(WARNING "gRPC_SSL_PROVIDER is \"module\" but BORINGSSL_ROOT_DIR is wrong")
@@ -280,7 +281,7 @@
   elseif("<%text>${gRPC_SSL_PROVIDER}</%text>" STREQUAL "package")
     find_package(OpenSSL REQUIRED)
     set(_gRPC_SSL_LIBRARIES <%text>${OPENSSL_LIBRARIES}</%text>)
-    include_directories(<%text>${OPENSSL_INCLUDE_DIR}</%text>)
+    set(_gRPC_SSL_INCLUDE_DIR <%text>${OPENSSL_INCLUDE_DIR}</%text>)
     set(_gRPC_FIND_SSL "if(NOT OPENSSL_FOUND)\n  find_package(OpenSSL)\nendif()")
   endif()
 
@@ -445,7 +446,7 @@
   % for lib in libs:
   % if lib.build in ["all", "protoc", "tool", "test", "private"] and not lib.boringssl:
   % if not lib.get('build_system', []) or 'cmake' in lib.get('build_system', []):
-  % if not lib.name in ['benchmark', 'z']:  # we build these using CMake instead
+  % if not lib.name in ['ares', 'benchmark', 'z']:  # we build these using CMake instead
   % if lib.build in ["test", "private"]:
   if (gRPC_BUILD_TESTS)
   ${cc_library(lib)}
@@ -515,7 +516,7 @@
   target_include_directories(${lib.name}
     PUBLIC <%text>$<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include></%text>
     PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>
-    PRIVATE <%text>${BORINGSSL_ROOT_DIR}</%text>/include
+    PRIVATE <%text>${_gRPC_SSL_INCLUDE_DIR}</%text>
     PRIVATE <%text>${PROTOBUF_ROOT_DIR}</%text>/src
     PRIVATE <%text>${ZLIB_INCLUDE_DIR}</%text>
     PRIVATE <%text>${BENCHMARK}</%text>/include
@@ -586,7 +587,7 @@
   target_include_directories(${tgt.name}
     PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>
     PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/include
-    PRIVATE <%text>${BORINGSSL_ROOT_DIR}</%text>/include
+    PRIVATE <%text>${_gRPC_SSL_INCLUDE_DIR}</%text>
     PRIVATE <%text>${PROTOBUF_ROOT_DIR}</%text>/src
     PRIVATE <%text>${BENCHMARK_ROOT_DIR}</%text>/include
     PRIVATE <%text>${ZLIB_ROOT_DIR}</%text>
diff --git a/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template b/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template
index aacb3ec..e25791d 100644
--- a/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template
+++ b/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template
@@ -16,4 +16,4 @@
 
   # AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!!
 
-  VERSION='${settings.python_version.pep440()}'
+  VERSION = '${settings.python_version.pep440()}'
diff --git a/test/core/end2end/dualstack_socket_test.cc b/test/core/end2end/dualstack_socket_test.cc
index ad2b24f..2ba1c17 100644
--- a/test/core/end2end/dualstack_socket_test.cc
+++ b/test/core/end2end/dualstack_socket_test.cc
@@ -29,7 +29,9 @@
 #include <grpc/support/log.h>
 #include <grpc/support/string_util.h>
 
+#include "src/core/lib/iomgr/error.h"
 #include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
 #include "src/core/lib/iomgr/socket_utils_posix.h"
 #include "src/core/lib/slice/slice_string_helpers.h"
 #include "src/core/lib/support/string.h"
@@ -54,6 +56,21 @@
 
 static void do_nothing(void* ignored) {}
 
+static void log_resolved_addrs(const char* label, const char* hostname) {
+  grpc_resolved_addresses* res = nullptr;
+  grpc_error* error = grpc_blocking_resolve_address(hostname, "80", &res);
+  if (error != GRPC_ERROR_NONE || res == nullptr) {
+    GRPC_LOG_IF_ERROR(hostname, error);
+    return;
+  }
+  for (size_t i = 0; i < res->naddrs; ++i) {
+    char* addr_str = grpc_sockaddr_to_uri(&res->addrs[i]);
+    gpr_log(GPR_INFO, "%s: %s", label, addr_str);
+    gpr_free(addr_str);
+  }
+  grpc_resolved_addresses_destroy(res);
+}
+
 void test_connect(const char* server_host, const char* client_host, int port,
                   int expect_ok) {
   char* client_hostport;
@@ -140,6 +157,8 @@
 
   gpr_log(GPR_INFO, "Testing with server=%s client=%s (expecting %s)",
           server_hostport, client_hostport, expect_ok ? "success" : "failure");
+  log_resolved_addrs("server resolved addr", server_host);
+  log_resolved_addrs("client resolved addr", client_host);
 
   gpr_free(client_hostport);
   gpr_free(server_hostport);
@@ -236,6 +255,8 @@
     CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
     cq_verify(cqv);
 
+    gpr_log(GPR_INFO, "status: %d (expected: %d)", status,
+            GRPC_STATUS_UNAVAILABLE);
     GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE);
   }
 
diff --git a/test/core/support/cpu_test.cc b/test/core/support/cpu_test.cc
index 6e04fee..334c431 100644
--- a/test/core/support/cpu_test.cc
+++ b/test/core/support/cpu_test.cc
@@ -115,7 +115,7 @@
   }
   gpr_mu_lock(&ct.mu);
   while (!ct.is_done) {
-    gpr_cv_wait(&ct.done_cv, &ct.mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+    gpr_cv_wait(&ct.done_cv, &ct.mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_unlock(&ct.mu);
   fprintf(stderr, "Saw cores [");
diff --git a/test/core/support/sync_test.cc b/test/core/support/sync_test.cc
index 3f534de..fb7ec44 100644
--- a/test/core/support/sync_test.cc
+++ b/test/core/support/sync_test.cc
@@ -73,7 +73,7 @@
      corresponding condition variable.  The predicate must be on state
      protected by the lock.  */
   while (q->length == N) {
-    gpr_cv_wait(&q->non_full, &q->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+    gpr_cv_wait(&q->non_full, &q->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   if (q->length == 0) { /* Wake threads blocked in queue_remove(). */
     /* It's normal to use gpr_cv_broadcast() or gpr_signal() while
@@ -197,7 +197,7 @@
 static void test_wait(struct test* m) {
   gpr_mu_lock(&m->mu);
   while (m->done != 0) {
-    gpr_cv_wait(&m->done_cv, &m->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+    gpr_cv_wait(&m->done_cv, &m->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_unlock(&m->mu);
 }
@@ -297,7 +297,7 @@
   for (i = 0; i != m->iterations; i++) {
     gpr_mu_lock(&m->mu);
     while ((m->counter % m->threads) != id) {
-      gpr_cv_wait(&m->cv, &m->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+      gpr_cv_wait(&m->cv, &m->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
     }
     m->counter++;
     gpr_cv_broadcast(&m->cv);
@@ -314,7 +314,7 @@
   for (i = 0; i != m->iterations; i++) {
     gpr_timespec deadline;
     gpr_mu_lock(&m->mu);
-    deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+    deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
                             gpr_time_from_micros(1000, GPR_TIMESPAN));
     while (!gpr_cv_wait(&m->cv, &m->mu, deadline)) {
     }
@@ -370,14 +370,14 @@
   int64_t i;
   int value;
   for (i = 0; i != n; i++) {
-    queue_remove(&m->q, &value, gpr_inf_future(GPR_CLOCK_REALTIME));
+    queue_remove(&m->q, &value, gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_lock(&m->mu);
   m->counter = n;
   gpr_mu_unlock(&m->mu);
   GPR_ASSERT(
       !queue_remove(&m->q, &value,
-                    gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+                    gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
                                  gpr_time_from_micros(1000000, GPR_TIMESPAN))));
   mark_thread_done(m);
 }
diff --git a/test/cpp/end2end/BUILD b/test/cpp/end2end/BUILD
index b29a13d..265f754 100644
--- a/test/cpp/end2end/BUILD
+++ b/test/cpp/end2end/BUILD
@@ -14,7 +14,7 @@
 
 licenses(["notice"])  # Apache v2
 
-load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_package")
+load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_package", "grpc_cc_binary")
 
 grpc_package(name = "test/cpp/end2end", visibility = "public") # Allows external users to implement end2end tests.
 
@@ -66,12 +66,15 @@
         "//test/core/util:grpc_test_util",
         "//test/cpp/util:test_util",
     ],
+    data = [
+        ":client_crash_test_server",
+    ],
     external_deps = [
         "gtest",
     ],
 )
 
-grpc_cc_test(
+grpc_cc_binary(
     name = "client_crash_test_server",
     srcs = ["client_crash_test_server.cc"],
     deps = [
@@ -301,9 +304,12 @@
     external_deps = [
         "gtest",
     ],
+    data = [
+        ":server_crash_test_client",
+    ],
 )
 
-grpc_cc_test(
+grpc_cc_binary(
     name = "server_crash_test_client",
     srcs = ["server_crash_test_client.cc"],
     deps = [
diff --git a/test/cpp/util/cli_call.cc b/test/cpp/util/cli_call.cc
index c3220ef..4f1a20c 100644
--- a/test/cpp/util/cli_call.cc
+++ b/test/cpp/util/cli_call.cc
@@ -126,7 +126,7 @@
   call_->Write(send_buffer, tag(2));
   write_done_ = false;
   while (!write_done_) {
-    gpr_cv_wait(&write_cv_, &write_mu_, gpr_inf_future(GPR_CLOCK_REALTIME));
+    gpr_cv_wait(&write_cv_, &write_mu_, gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_unlock(&write_mu_);
 }
@@ -136,7 +136,7 @@
   call_->WritesDone(tag(4));
   write_done_ = false;
   while (!write_done_) {
-    gpr_cv_wait(&write_cv_, &write_mu_, gpr_inf_future(GPR_CLOCK_REALTIME));
+    gpr_cv_wait(&write_cv_, &write_mu_, gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_unlock(&write_mu_);
 }
diff --git a/test/cpp/util/cli_credentials.cc b/test/cpp/util/cli_credentials.cc
index f1f43f8..aa4eafb 100644
--- a/test/cpp/util/cli_credentials.cc
+++ b/test/cpp/util/cli_credentials.cc
@@ -22,27 +22,43 @@
 
 DEFINE_bool(enable_ssl, false, "Whether to use ssl/tls.");
 DEFINE_bool(use_auth, false, "Whether to create default google credentials.");
+DEFINE_string(
+    access_token, "",
+    "The access token that will be sent to the server to authenticate RPCs.");
 
 namespace grpc {
 namespace testing {
 
 std::shared_ptr<grpc::ChannelCredentials> CliCredentials::GetCredentials()
     const {
-  if (!FLAGS_enable_ssl) {
-    return grpc::InsecureChannelCredentials();
-  } else {
+  if (!FLAGS_access_token.empty()) {
     if (FLAGS_use_auth) {
-      return grpc::GoogleDefaultCredentials();
-    } else {
-      return grpc::SslCredentials(grpc::SslCredentialsOptions());
+      fprintf(stderr,
+              "warning: use_auth is ignored when access_token is provided.");
     }
+
+    return grpc::CompositeChannelCredentials(
+        grpc::SslCredentials(grpc::SslCredentialsOptions()),
+        grpc::AccessTokenCredentials(FLAGS_access_token));
   }
+
+  if (FLAGS_use_auth) {
+    return grpc::GoogleDefaultCredentials();
+  }
+
+  if (FLAGS_enable_ssl) {
+    return grpc::SslCredentials(grpc::SslCredentialsOptions());
+  }
+
+  return grpc::InsecureChannelCredentials();
 }
 
 const grpc::string CliCredentials::GetCredentialUsage() const {
   return "    --enable_ssl             ; Set whether to use tls\n"
          "    --use_auth               ; Set whether to create default google"
-         " credentials\n";
+         " credentials\n"
+         "    --access_token           ; Set the access token in metadata,"
+         " overrides --use_auth\n";
 }
 }  // namespace testing
 }  // namespace grpc
diff --git a/test/cpp/util/grpc_tool.cc b/test/cpp/util/grpc_tool.cc
index a6d08cd..30c43b2 100644
--- a/test/cpp/util/grpc_tool.cc
+++ b/test/cpp/util/grpc_tool.cc
@@ -124,13 +124,32 @@
     return;
   }
   std::vector<grpc::string> fields;
-  const char* delim = ":";
-  size_t cur, next = -1;
-  do {
-    cur = next + 1;
-    next = FLAGS_metadata.find_first_of(delim, cur);
-    fields.push_back(FLAGS_metadata.substr(cur, next - cur));
-  } while (next != grpc::string::npos);
+  const char delim = ':';
+  const char escape = '\\';
+  size_t cur = -1;
+  std::stringstream ss;
+  while (++cur < FLAGS_metadata.length()) {
+    switch (FLAGS_metadata.at(cur)) {
+      case escape:
+        if (cur < FLAGS_metadata.length() - 1) {
+          char c = FLAGS_metadata.at(++cur);
+          if (c == delim || c == escape) {
+            ss << c;
+            continue;
+          }
+        }
+        fprintf(stderr, "Failed to parse metadata flag.\n");
+        exit(1);
+      case delim:
+        fields.push_back(ss.str());
+        ss.str("");
+        ss.clear();
+        break;
+      default:
+        ss << FLAGS_metadata.at(cur);
+    }
+  }
+  fields.push_back(ss.str());
   if (fields.size() % 2) {
     fprintf(stderr, "Failed to parse metadata flag.\n");
     exit(1);
diff --git a/test/cpp/util/grpc_tool_test.cc b/test/cpp/util/grpc_tool_test.cc
index 1c07b2a..0b599f4 100644
--- a/test/cpp/util/grpc_tool_test.cc
+++ b/test/cpp/util/grpc_tool_test.cc
@@ -85,6 +85,8 @@
 DECLARE_bool(binary_output);
 DECLARE_bool(l);
 DECLARE_bool(batch);
+DECLARE_string(metadata);
+DECLARE_string(protofiles);
 
 namespace {
 
@@ -618,6 +620,8 @@
   // Expected output: ECHO_RESPONSE_MESSAGE
   EXPECT_TRUE(0 == strcmp(output_stream.str().c_str(), ECHO_RESPONSE_MESSAGE));
 
+  FLAGS_binary_input = false;
+  FLAGS_binary_output = false;
   ShutdownServer();
 }
 
@@ -652,6 +656,84 @@
   EXPECT_TRUE(0 == output_stream.tellp());
 }
 
+TEST_F(GrpcToolTest, CallCommandWithMetadata) {
+  // Test input "grpc_cli call localhost:<port> Echo "message: 'Hello'"
+  const grpc::string server_address = SetUpServer();
+  const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "Echo",
+                        "message: 'Hello'"};
+
+  {
+    std::stringstream output_stream;
+    FLAGS_metadata = "key0:val0:key1:valq:key2:val2";
+    EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv,
+                                     TestCliCredentials(),
+                                     std::bind(PrintStream, &output_stream,
+                                               std::placeholders::_1)));
+    // Expected output: "message: \"Hello\""
+    EXPECT_TRUE(nullptr !=
+                strstr(output_stream.str().c_str(), "message: \"Hello\""));
+  }
+
+  {
+    std::stringstream output_stream;
+    FLAGS_metadata = "key:val\\:val";
+    EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv,
+                                     TestCliCredentials(),
+                                     std::bind(PrintStream, &output_stream,
+                                               std::placeholders::_1)));
+    // Expected output: "message: \"Hello\""
+    EXPECT_TRUE(nullptr !=
+                strstr(output_stream.str().c_str(), "message: \"Hello\""));
+  }
+
+  {
+    std::stringstream output_stream;
+    FLAGS_metadata = "key:val\\\\val";
+    EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv,
+                                     TestCliCredentials(),
+                                     std::bind(PrintStream, &output_stream,
+                                               std::placeholders::_1)));
+    // Expected output: "message: \"Hello\""
+    EXPECT_TRUE(nullptr !=
+                strstr(output_stream.str().c_str(), "message: \"Hello\""));
+  }
+
+  FLAGS_metadata = "";
+  ShutdownServer();
+}
+
+TEST_F(GrpcToolTest, CallCommandWithBadMetadata) {
+  // Test input "grpc_cli call localhost:10000 Echo "message: 'Hello'"
+  const char* argv[] = {"grpc_cli", "call", "localhost:10000", "Echo",
+                        "message: 'Hello'"};
+  FLAGS_protofiles = "src/proto/grpc/testing/echo.proto";
+
+  {
+    std::stringstream output_stream;
+    FLAGS_metadata = "key0:val0:key1";
+    // Exit with 1
+    EXPECT_EXIT(
+        GrpcToolMainLib(
+            ArraySize(argv), argv, TestCliCredentials(),
+            std::bind(PrintStream, &output_stream, std::placeholders::_1)),
+        ::testing::ExitedWithCode(1), ".*Failed to parse metadata flag.*");
+  }
+
+  {
+    std::stringstream output_stream;
+    FLAGS_metadata = "key:val\\val";
+    // Exit with 1
+    EXPECT_EXIT(
+        GrpcToolMainLib(
+            ArraySize(argv), argv, TestCliCredentials(),
+            std::bind(PrintStream, &output_stream, std::placeholders::_1)),
+        ::testing::ExitedWithCode(1), ".*Failed to parse metadata flag.*");
+  }
+
+  FLAGS_metadata = "";
+  FLAGS_protofiles = "";
+}
+
 }  // namespace testing
 }  // namespace grpc
 
diff --git a/third_party/BUILD b/third_party/BUILD
new file mode 100644
index 0000000..dea1229
--- /dev/null
+++ b/third_party/BUILD
@@ -0,0 +1,6 @@
+exports_files([
+    "benchmark.BUILD",
+    "gtest.BUILD",
+    "objective_c/Cronet/bidirectional_stream_c.h",
+    "zlib.BUILD",
+])
diff --git a/third_party/cares/BUILD b/third_party/cares/BUILD
new file mode 100644
index 0000000..ad27d93
--- /dev/null
+++ b/third_party/cares/BUILD
@@ -0,0 +1,9 @@
+exports_files([
+    "ares_build.h",
+    "cares.BUILD",
+    "config_android/ares_config.h",
+    "config_darwin/ares_config.h",
+    "config_freebsd/ares_config.h",
+    "config_linux/ares_config.h",
+    "config_openbsd/ares_config.h",
+])
diff --git a/third_party/cares/cares.BUILD b/third_party/cares/cares.BUILD
index 85ca506..3ac0287 100644
--- a/third_party/cares/cares.BUILD
+++ b/third_party/cares/cares.BUILD
@@ -35,33 +35,27 @@
 )
 
 genrule(
-    name = "ares_build",
-    srcs = ["@cares_local_files//:ares_build_h"],
+    name = "ares_build_h",
+    srcs = ["@com_github_grpc_grpc//third_party/cares:ares_build.h"],
     outs = ["ares_build.h"],
-    cmd = "cat $(location @cares_local_files//:ares_build_h) > $@",
+    cmd = "cat $< > $@",
 )
 
-# cc_library(
-#     name = "ares_build_h",
-#     hdrs = ["ares_build.h"],
-#     data = [":ares_build"],
-#     includes = ["."],
-# )
-
 genrule(
-    name = "ares_config",
-    srcs = ["@cares_local_files//:ares_config_h"],
+    name = "ares_config_h",
+    srcs = select({
+        ":ios_x86_64": ["@com_github_grpc_grpc//third_party/cares:config_darwin/ares_config.h"],
+        ":ios_armv7": ["@com_github_grpc_grpc//third_party/cares:config_darwin/ares_config.h"],
+        ":ios_armv7s": ["@com_github_grpc_grpc//third_party/cares:config_darwin/ares_config.h"],
+        ":ios_arm64": ["@com_github_grpc_grpc//third_party/cares:config_darwin/ares_config.h"],
+        ":darwin": ["@com_github_grpc_grpc//third_party/cares:config_darwin/ares_config.h"],
+        ":android": ["@com_github_grpc_grpc//third_party/cares:config_android/ares_config.h"],
+        "//conditions:default": ["@com_github_grpc_grpc//third_party/cares:config_linux/ares_config.h"],
+    }),
     outs = ["ares_config.h"],
-    cmd = "cat $(location @cares_local_files//:ares_config_h) > $@",
+    cmd = "cat $< > $@",
 )
 
-# cc_library(
-#     name = "ares_config_h",
-#     hdrs = ["ares_config.h"],
-#     data = [":ares_config"],
-#     includes = ["."],
-# )
-
 cc_library(
     name = "ares",
     srcs = [
@@ -147,10 +141,6 @@
         "-DNOMINMAX",
         "-DHAVE_CONFIG_H",
     ],
-    data = [
-        ":ares_build",
-        ":ares_config",
-    ],
     includes = ["."],
     linkstatic = 1,
     visibility = [
diff --git a/third_party/cares/cares_local_files.BUILD b/third_party/cares/cares_local_files.BUILD
deleted file mode 100644
index fe59447..0000000
--- a/third_party/cares/cares_local_files.BUILD
+++ /dev/null
@@ -1,57 +0,0 @@
-package(
-    default_visibility = ["//visibility:public"],
-)
-
-config_setting(
-    name = "darwin",
-    values = {"cpu": "darwin"},
-)
-
-# Android is not officially supported through C++.
-# This just helps with the build for now.
-config_setting(
-    name = "android",
-    values = {
-        "crosstool_top": "//external:android/crosstool",
-    },
-)
-
-# iOS is not officially supported through C++.
-# This just helps with the build for now.
-config_setting(
-    name = "ios_x86_64",
-    values = {"cpu": "ios_x86_64"},
-)
-
-config_setting(
-    name = "ios_armv7",
-    values = {"cpu": "ios_armv7"},
-)
-
-config_setting(
-    name = "ios_armv7s",
-    values = {"cpu": "ios_armv7s"},
-)
-
-config_setting(
-    name = "ios_arm64",
-    values = {"cpu": "ios_arm64"},
-)
-
-filegroup(
-    name = "ares_build_h",
-    srcs = ["ares_build.h"],
-)
-
-filegroup(
-    name = "ares_config_h",
-    srcs = select({
-        ":ios_x86_64": ["config_darwin/ares_config.h"],
-        ":ios_armv7": ["config_darwin/ares_config.h"],
-        ":ios_armv7s": ["config_darwin/ares_config.h"],
-        ":ios_arm64": ["config_darwin/ares_config.h"],
-        ":darwin": ["config_darwin/ares_config.h"],
-        ":android": ["config_android/ares_config.h"],
-        "//conditions:default": ["config_linux/ares_config.h"],
-    }),
-)
diff --git a/tools/buildgen/build-cleaner.py b/tools/buildgen/build-cleaner.py
index 7b42844..a6b86fb 100755
--- a/tools/buildgen/build-cleaner.py
+++ b/tools/buildgen/build-cleaner.py
@@ -22,65 +22,65 @@
 
 TEST = (os.environ.get('TEST', 'false') == 'true')
 
-_TOP_LEVEL_KEYS = ['settings', 'proto_deps', 'filegroups', 'libs', 'targets', 'vspackages']
+_TOP_LEVEL_KEYS = [
+    'settings', 'proto_deps', 'filegroups', 'libs', 'targets', 'vspackages'
+]
 _ELEM_KEYS = [
-    'name',
-    'gtest',
-    'cpu_cost',
-    'flaky',
-    'build',
-    'run',
-    'language',
-    'public_headers',
-    'headers',
-    'src',
-    'deps']
+    'name', 'gtest', 'cpu_cost', 'flaky', 'build', 'run', 'language',
+    'public_headers', 'headers', 'src', 'deps'
+]
+
 
 def repr_ordered_dict(dumper, odict):
-  return dumper.represent_mapping(u'tag:yaml.org,2002:map', odict.items())
+    return dumper.represent_mapping(u'tag:yaml.org,2002:map', odict.items())
+
 
 yaml.add_representer(collections.OrderedDict, repr_ordered_dict)
 
+
 def rebuild_as_ordered_dict(indict, special_keys):
-  outdict = collections.OrderedDict()
-  for key in sorted(indict.keys()):
-    if '#' in key:
-      outdict[key] = indict[key]
-  for key in special_keys:
-    if key in indict:
-      outdict[key] = indict[key]
-  for key in sorted(indict.keys()):
-    if key in special_keys: continue
-    if '#' in key: continue
-    outdict[key] = indict[key]
-  return outdict
+    outdict = collections.OrderedDict()
+    for key in sorted(indict.keys()):
+        if '#' in key:
+            outdict[key] = indict[key]
+    for key in special_keys:
+        if key in indict:
+            outdict[key] = indict[key]
+    for key in sorted(indict.keys()):
+        if key in special_keys: continue
+        if '#' in key: continue
+        outdict[key] = indict[key]
+    return outdict
+
 
 def clean_elem(indict):
-  for name in ['public_headers', 'headers', 'src']:
-    if name not in indict: continue
-    inlist = indict[name]
-    protos = list(x for x in inlist if os.path.splitext(x)[1] == '.proto')
-    others = set(x for x in inlist if x not in protos)
-    indict[name] = protos + sorted(others)
-  return rebuild_as_ordered_dict(indict, _ELEM_KEYS)
+    for name in ['public_headers', 'headers', 'src']:
+        if name not in indict: continue
+        inlist = indict[name]
+        protos = list(x for x in inlist if os.path.splitext(x)[1] == '.proto')
+        others = set(x for x in inlist if x not in protos)
+        indict[name] = protos + sorted(others)
+    return rebuild_as_ordered_dict(indict, _ELEM_KEYS)
+
 
 for filename in sys.argv[1:]:
-  with open(filename) as f:
-    js = yaml.load(f)
-  js = rebuild_as_ordered_dict(js, _TOP_LEVEL_KEYS)
-  for grp in ['filegroups', 'libs', 'targets']:
-    if grp not in js: continue
-    js[grp] = sorted([clean_elem(x) for x in js[grp]],
-                     key=lambda x: (x.get('language', '_'), x['name']))
-  output = yaml.dump(js, indent=2, width=80, default_flow_style=False)
-  # massage out trailing whitespace
-  lines = []
-  for line in output.splitlines():
-    lines.append(line.rstrip() + '\n')
-  output = ''.join(lines)
-  if TEST:
     with open(filename) as f:
-      assert f.read() == output
-  else:
-    with open(filename, 'w') as f:
-      f.write(output)
+        js = yaml.load(f)
+    js = rebuild_as_ordered_dict(js, _TOP_LEVEL_KEYS)
+    for grp in ['filegroups', 'libs', 'targets']:
+        if grp not in js: continue
+        js[grp] = sorted(
+            [clean_elem(x) for x in js[grp]],
+            key=lambda x: (x.get('language', '_'), x['name']))
+    output = yaml.dump(js, indent=2, width=80, default_flow_style=False)
+    # massage out trailing whitespace
+    lines = []
+    for line in output.splitlines():
+        lines.append(line.rstrip() + '\n')
+    output = ''.join(lines)
+    if TEST:
+        with open(filename) as f:
+            assert f.read() == output
+    else:
+        with open(filename, 'w') as f:
+            f.write(output)
diff --git a/tools/buildgen/bunch.py b/tools/buildgen/bunch.py
index 813051a..0ce9a6b 100755
--- a/tools/buildgen/bunch.py
+++ b/tools/buildgen/bunch.py
@@ -11,43 +11,42 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Allows dot-accessible dictionaries."""
 
 
 class Bunch(dict):
 
-  def __init__(self, d):
-    dict.__init__(self, d)
-    self.__dict__.update(d)
+    def __init__(self, d):
+        dict.__init__(self, d)
+        self.__dict__.update(d)
 
 
 # Converts any kind of variable to a Bunch
 def to_bunch(var):
-  if isinstance(var, list):
-    return [to_bunch(i) for i in var]
-  if isinstance(var, dict):
-    ret = {}
-    for k, v in var.items():
-      if isinstance(v, (list, dict)):
-        v = to_bunch(v)
-      ret[k] = v
-    return Bunch(ret)
-  else:
-    return var
+    if isinstance(var, list):
+        return [to_bunch(i) for i in var]
+    if isinstance(var, dict):
+        ret = {}
+        for k, v in var.items():
+            if isinstance(v, (list, dict)):
+                v = to_bunch(v)
+            ret[k] = v
+        return Bunch(ret)
+    else:
+        return var
 
 
 # Merges JSON 'add' into JSON 'dst'
 def merge_json(dst, add):
-  if isinstance(dst, dict) and isinstance(add, dict):
-    for k, v in add.items():
-      if k in dst:
-        if k == '#': continue
-        merge_json(dst[k], v)
-      else:
-        dst[k] = v
-  elif isinstance(dst, list) and isinstance(add, list):
-    dst.extend(add)
-  else:
-    raise Exception('Tried to merge incompatible objects %s %s\n\n%r\n\n%r' % (type(dst).__name__, type(add).__name__, dst, add))
-
+    if isinstance(dst, dict) and isinstance(add, dict):
+        for k, v in add.items():
+            if k in dst:
+                if k == '#': continue
+                merge_json(dst[k], v)
+            else:
+                dst[k] = v
+    elif isinstance(dst, list) and isinstance(add, list):
+        dst.extend(add)
+    else:
+        raise Exception('Tried to merge incompatible objects %s %s\n\n%r\n\n%r'
+                        % (type(dst).__name__, type(add).__name__, dst, add))
diff --git a/tools/buildgen/generate_projects.py b/tools/buildgen/generate_projects.py
index d29cd02..bb5de9c 100755
--- a/tools/buildgen/generate_projects.py
+++ b/tools/buildgen/generate_projects.py
@@ -21,7 +21,9 @@
 import sys
 import tempfile
 import multiprocessing
-sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', 'run_tests', 'python_utils'))
+sys.path.append(
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', 'run_tests', 'python_utils'))
 
 assert sys.argv[1:], 'run generate_projects.sh instead of this directly'
 
@@ -45,57 +47,58 @@
 
 templates = args.templates
 if not templates:
-  for root, dirs, files in os.walk('templates'):
-    for f in files:
-      templates.append(os.path.join(root, f))
+    for root, dirs, files in os.walk('templates'):
+        for f in files:
+            templates.append(os.path.join(root, f))
 
 pre_jobs = []
 base_cmd = ['python2.7', 'tools/buildgen/mako_renderer.py']
 cmd = base_cmd[:]
 for plugin in plugins:
-  cmd.append('-p')
-  cmd.append(plugin)
+    cmd.append('-p')
+    cmd.append(plugin)
 for js in json:
-  cmd.append('-d')
-  cmd.append(js)
+    cmd.append('-d')
+    cmd.append(js)
 cmd.append('-w')
 preprocessed_build = '.preprocessed_build'
 cmd.append(preprocessed_build)
 if args.output_merged is not None:
-  cmd.append('-M')
-  cmd.append(args.output_merged)
-pre_jobs.append(jobset.JobSpec(cmd, shortname='preprocess', timeout_seconds=None))
+    cmd.append('-M')
+    cmd.append(args.output_merged)
+pre_jobs.append(
+    jobset.JobSpec(cmd, shortname='preprocess', timeout_seconds=None))
 
 jobs = []
 for template in reversed(sorted(templates)):
-  root, f = os.path.split(template)
-  if os.path.splitext(f)[1] == '.template':
-    out_dir = args.base + root[len('templates'):]
-    out = out_dir + '/' + os.path.splitext(f)[0]
-    if not os.path.exists(out_dir):
-      os.makedirs(out_dir)
-    cmd = base_cmd[:]
-    cmd.append('-P')
-    cmd.append(preprocessed_build)
-    cmd.append('-o')
-    if test is None:
-      cmd.append(out)
-    else:
-      tf = tempfile.mkstemp()
-      test[out] = tf[1]
-      os.close(tf[0])
-      cmd.append(test[out])
-    cmd.append(args.base + '/' + root + '/' + f)
-    jobs.append(jobset.JobSpec(cmd, shortname=out, timeout_seconds=None))
+    root, f = os.path.split(template)
+    if os.path.splitext(f)[1] == '.template':
+        out_dir = args.base + root[len('templates'):]
+        out = out_dir + '/' + os.path.splitext(f)[0]
+        if not os.path.exists(out_dir):
+            os.makedirs(out_dir)
+        cmd = base_cmd[:]
+        cmd.append('-P')
+        cmd.append(preprocessed_build)
+        cmd.append('-o')
+        if test is None:
+            cmd.append(out)
+        else:
+            tf = tempfile.mkstemp()
+            test[out] = tf[1]
+            os.close(tf[0])
+            cmd.append(test[out])
+        cmd.append(args.base + '/' + root + '/' + f)
+        jobs.append(jobset.JobSpec(cmd, shortname=out, timeout_seconds=None))
 
 jobset.run(pre_jobs, maxjobs=args.jobs)
 jobset.run(jobs, maxjobs=args.jobs)
 
 if test is not None:
-  for s, g in test.iteritems():
-    if os.path.isfile(g):
-      assert 0 == os.system('diff %s %s' % (s, g)), s
-      os.unlink(g)
-    else:
-      assert 0 == os.system('diff -r %s %s' % (s, g)), s
-      shutil.rmtree(g, ignore_errors=True)
+    for s, g in test.iteritems():
+        if os.path.isfile(g):
+            assert 0 == os.system('diff %s %s' % (s, g)), s
+            os.unlink(g)
+        else:
+            assert 0 == os.system('diff -r %s %s' % (s, g)), s
+            shutil.rmtree(g, ignore_errors=True)
diff --git a/tools/buildgen/mako_renderer.py b/tools/buildgen/mako_renderer.py
index 7738053..0569fa1 100755
--- a/tools/buildgen/mako_renderer.py
+++ b/tools/buildgen/mako_renderer.py
@@ -12,8 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
-
 """Simple Mako renderer.
 
 Just a wrapper around the mako rendering library.
@@ -27,7 +25,6 @@
 import shutil
 import sys
 
-
 from mako.lookup import TemplateLookup
 from mako.runtime import Context
 from mako.template import Template
@@ -37,151 +34,158 @@
 
 # Imports a plugin
 def import_plugin(name):
-  _, base_ex = os.path.split(name)
-  base, _ = os.path.splitext(base_ex)
+    _, base_ex = os.path.split(name)
+    base, _ = os.path.splitext(base_ex)
 
-  with open(name, 'r') as plugin_file:
-    plugin_code = plugin_file.read()
-  plugin_module = imp.new_module(base)
-  exec plugin_code in plugin_module.__dict__
-  return plugin_module
+    with open(name, 'r') as plugin_file:
+        plugin_code = plugin_file.read()
+    plugin_module = imp.new_module(base)
+    exec plugin_code in plugin_module.__dict__
+    return plugin_module
 
 
 def out(msg):
-  print >> sys.stderr, msg
+    print >> sys.stderr, msg
 
 
 def showhelp():
-  out('mako-renderer.py [-o out] [-m cache] [-P preprocessed_input] [-d dict] [-d dict...]'
-      ' [-t template] [-w preprocessed_output]')
+    out('mako-renderer.py [-o out] [-m cache] [-P preprocessed_input] [-d dict] [-d dict...]'
+        ' [-t template] [-w preprocessed_output]')
 
 
 def main(argv):
-  got_input = False
-  module_directory = None
-  preprocessed_output = None
-  dictionary = {}
-  json_dict = {}
-  got_output = False
-  plugins = []
-  output_name = None
-  got_preprocessed_input = False
-  output_merged = None
+    got_input = False
+    module_directory = None
+    preprocessed_output = None
+    dictionary = {}
+    json_dict = {}
+    got_output = False
+    plugins = []
+    output_name = None
+    got_preprocessed_input = False
+    output_merged = None
 
-  try:
-    opts, args = getopt.getopt(argv, 'hM:m:d:o:p:t:P:w:')
-  except getopt.GetoptError:
-    out('Unknown option')
-    showhelp()
-    sys.exit(2)
-
-  for opt, arg in opts:
-    if opt == '-h':
-      out('Displaying showhelp')
-      showhelp()
-      sys.exit()
-    elif opt == '-o':
-      if got_output:
-        out('Got more than one output')
+    try:
+        opts, args = getopt.getopt(argv, 'hM:m:d:o:p:t:P:w:')
+    except getopt.GetoptError:
+        out('Unknown option')
         showhelp()
-        sys.exit(3)
-      got_output = True
-      output_name = arg
-    elif opt == '-m':
-      if module_directory is not None:
-        out('Got more than one cache directory')
+        sys.exit(2)
+
+    for opt, arg in opts:
+        if opt == '-h':
+            out('Displaying showhelp')
+            showhelp()
+            sys.exit()
+        elif opt == '-o':
+            if got_output:
+                out('Got more than one output')
+                showhelp()
+                sys.exit(3)
+            got_output = True
+            output_name = arg
+        elif opt == '-m':
+            if module_directory is not None:
+                out('Got more than one cache directory')
+                showhelp()
+                sys.exit(4)
+            module_directory = arg
+        elif opt == '-M':
+            if output_merged is not None:
+                out('Got more than one output merged path')
+                showhelp()
+                sys.exit(5)
+            output_merged = arg
+        elif opt == '-P':
+            assert not got_preprocessed_input
+            assert json_dict == {}
+            sys.path.insert(
+                0,
+                os.path.abspath(
+                    os.path.join(os.path.dirname(sys.argv[0]), 'plugins')))
+            with open(arg, 'r') as dict_file:
+                dictionary = pickle.load(dict_file)
+            got_preprocessed_input = True
+        elif opt == '-d':
+            assert not got_preprocessed_input
+            with open(arg, 'r') as dict_file:
+                bunch.merge_json(json_dict, yaml.load(dict_file.read()))
+        elif opt == '-p':
+            plugins.append(import_plugin(arg))
+        elif opt == '-w':
+            preprocessed_output = arg
+
+    if not got_preprocessed_input:
+        for plugin in plugins:
+            plugin.mako_plugin(json_dict)
+        if output_merged:
+            with open(output_merged, 'w') as yaml_file:
+                yaml_file.write(yaml.dump(json_dict))
+        for k, v in json_dict.items():
+            dictionary[k] = bunch.to_bunch(v)
+
+    if preprocessed_output:
+        with open(preprocessed_output, 'w') as dict_file:
+            pickle.dump(dictionary, dict_file)
+
+    cleared_dir = False
+    for arg in args:
+        got_input = True
+        with open(arg) as f:
+            srcs = list(yaml.load_all(f.read()))
+        for src in srcs:
+            if isinstance(src, basestring):
+                assert len(srcs) == 1
+                template = Template(
+                    src,
+                    filename=arg,
+                    module_directory=module_directory,
+                    lookup=TemplateLookup(directories=['.']))
+                with open(output_name, 'w') as output_file:
+                    template.render_context(Context(output_file, **dictionary))
+            else:
+                # we have optional control data: this template represents
+                # a directory
+                if not cleared_dir:
+                    if not os.path.exists(output_name):
+                        pass
+                    elif os.path.isfile(output_name):
+                        os.unlink(output_name)
+                    else:
+                        shutil.rmtree(output_name, ignore_errors=True)
+                    cleared_dir = True
+                items = []
+                if 'foreach' in src:
+                    for el in dictionary[src['foreach']]:
+                        if 'cond' in src:
+                            args = dict(dictionary)
+                            args['selected'] = el
+                            if not eval(src['cond'], {}, args):
+                                continue
+                        items.append(el)
+                    assert items
+                else:
+                    items = [None]
+                for item in items:
+                    args = dict(dictionary)
+                    args['selected'] = item
+                    item_output_name = os.path.join(
+                        output_name,
+                        Template(src['output_name']).render(**args))
+                    if not os.path.exists(os.path.dirname(item_output_name)):
+                        os.makedirs(os.path.dirname(item_output_name))
+                    template = Template(
+                        src['template'],
+                        filename=arg,
+                        module_directory=module_directory,
+                        lookup=TemplateLookup(directories=['.']))
+                    with open(item_output_name, 'w') as output_file:
+                        template.render_context(Context(output_file, **args))
+
+    if not got_input and not preprocessed_output:
+        out('Got nothing to do')
         showhelp()
-        sys.exit(4)
-      module_directory = arg
-    elif opt == '-M':
-      if output_merged is not None:
-        out('Got more than one output merged path')
-        showhelp()
-        sys.exit(5)
-      output_merged = arg
-    elif opt == '-P':
-      assert not got_preprocessed_input
-      assert json_dict == {}
-      sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'plugins')))
-      with open(arg, 'r') as dict_file:
-        dictionary = pickle.load(dict_file)
-      got_preprocessed_input = True
-    elif opt == '-d':
-      assert not got_preprocessed_input
-      with open(arg, 'r') as dict_file:
-        bunch.merge_json(json_dict, yaml.load(dict_file.read()))
-    elif opt == '-p':
-      plugins.append(import_plugin(arg))
-    elif opt == '-w':
-      preprocessed_output = arg
 
-  if not got_preprocessed_input:
-    for plugin in plugins:
-      plugin.mako_plugin(json_dict)
-    if output_merged:
-      with open(output_merged, 'w') as yaml_file:
-        yaml_file.write(yaml.dump(json_dict))
-    for k, v in json_dict.items():
-      dictionary[k] = bunch.to_bunch(v)
-
-  if preprocessed_output:
-    with open(preprocessed_output, 'w') as dict_file:
-      pickle.dump(dictionary, dict_file)
-
-  cleared_dir = False
-  for arg in args:
-    got_input = True
-    with open(arg) as f:
-      srcs = list(yaml.load_all(f.read()))
-    for src in srcs:
-      if isinstance(src, basestring):
-        assert len(srcs) == 1
-        template = Template(src,
-                            filename=arg,
-                            module_directory=module_directory,
-                            lookup=TemplateLookup(directories=['.']))
-        with open(output_name, 'w') as output_file:
-          template.render_context(Context(output_file, **dictionary))
-      else:
-        # we have optional control data: this template represents
-        # a directory
-        if not cleared_dir:
-          if not os.path.exists(output_name):
-            pass
-          elif os.path.isfile(output_name):
-            os.unlink(output_name)
-          else:
-            shutil.rmtree(output_name, ignore_errors=True)
-          cleared_dir = True
-        items = []
-        if 'foreach' in src:
-          for el in dictionary[src['foreach']]:
-            if 'cond' in src:
-              args = dict(dictionary)
-              args['selected'] = el
-              if not eval(src['cond'], {}, args):
-                continue
-            items.append(el)
-          assert items
-        else:
-          items = [None]
-        for item in items:
-          args = dict(dictionary)
-          args['selected'] = item
-          item_output_name = os.path.join(
-              output_name, Template(src['output_name']).render(**args))
-          if not os.path.exists(os.path.dirname(item_output_name)):
-            os.makedirs(os.path.dirname(item_output_name))
-          template = Template(src['template'],
-                              filename=arg,
-                              module_directory=module_directory,
-                              lookup=TemplateLookup(directories=['.']))
-          with open(item_output_name, 'w') as output_file:
-            template.render_context(Context(output_file, **args))
-
-  if not got_input and not preprocessed_output:
-    out('Got nothing to do')
-    showhelp()
 
 if __name__ == '__main__':
-  main(sys.argv[1:])
+    main(sys.argv[1:])
diff --git a/tools/buildgen/plugins/expand_bin_attrs.py b/tools/buildgen/plugins/expand_bin_attrs.py
index 6ad6e9c..d5acd8d 100755
--- a/tools/buildgen/plugins/expand_bin_attrs.py
+++ b/tools/buildgen/plugins/expand_bin_attrs.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Buildgen expand binary attributes plugin.
 
 This fills in any optional attributes.
@@ -20,7 +19,7 @@
 
 
 def mako_plugin(dictionary):
-  """The exported plugin code for expand_filegroups.
+    """The exported plugin code for expand_filegroups.
 
   The list of libs in the build.yaml file can contain "filegroups" tags.
   These refer to the filegroups in the root object. We will expand and
@@ -28,20 +27,20 @@
 
   """
 
-  targets = dictionary.get('targets')
-  default_platforms = ['windows', 'posix', 'linux', 'mac']
+    targets = dictionary.get('targets')
+    default_platforms = ['windows', 'posix', 'linux', 'mac']
 
-  for tgt in targets:
-    tgt['flaky'] = tgt.get('flaky', False)
-    tgt['platforms'] = sorted(tgt.get('platforms', default_platforms))
-    tgt['ci_platforms'] = sorted(tgt.get('ci_platforms', tgt['platforms']))
-    tgt['boringssl'] = tgt.get('boringssl', False)
-    tgt['zlib'] = tgt.get('zlib', False)
-    tgt['ares'] = tgt.get('ares', False)
-    tgt['gtest'] = tgt.get('gtest', False)
+    for tgt in targets:
+        tgt['flaky'] = tgt.get('flaky', False)
+        tgt['platforms'] = sorted(tgt.get('platforms', default_platforms))
+        tgt['ci_platforms'] = sorted(tgt.get('ci_platforms', tgt['platforms']))
+        tgt['boringssl'] = tgt.get('boringssl', False)
+        tgt['zlib'] = tgt.get('zlib', False)
+        tgt['ares'] = tgt.get('ares', False)
+        tgt['gtest'] = tgt.get('gtest', False)
 
-  libs = dictionary.get('libs')
-  for lib in libs:
-    lib['boringssl'] = lib.get('boringssl', False)
-    lib['zlib'] = lib.get('zlib', False)
-    lib['ares'] = lib.get('ares', False)
+    libs = dictionary.get('libs')
+    for lib in libs:
+        lib['boringssl'] = lib.get('boringssl', False)
+        lib['zlib'] = lib.get('zlib', False)
+        lib['ares'] = lib.get('ares', False)
diff --git a/tools/buildgen/plugins/expand_filegroups.py b/tools/buildgen/plugins/expand_filegroups.py
index 6697040..886a59c 100755
--- a/tools/buildgen/plugins/expand_filegroups.py
+++ b/tools/buildgen/plugins/expand_filegroups.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Buildgen expand filegroups plugin.
 
 This takes the list of libs from our yaml dictionary,
@@ -21,132 +20,135 @@
 
 
 def excluded(filename, exclude_res):
-  for r in exclude_res:
-    if r.search(filename):
-      return True
-  return False
+    for r in exclude_res:
+        if r.search(filename):
+            return True
+    return False
 
 
 def uniquify(lst):
-  out = []
-  for el in lst:
-    if el not in out:
-      out.append(el)
-  return out
+    out = []
+    for el in lst:
+        if el not in out:
+            out.append(el)
+    return out
 
 
 FILEGROUP_LISTS = ['src', 'headers', 'public_headers', 'deps']
 
-
 FILEGROUP_DEFAULTS = {
-  'language': 'c',
-  'boringssl': False,
-  'zlib': False,
-  'ares': False,
+    'language': 'c',
+    'boringssl': False,
+    'zlib': False,
+    'ares': False,
 }
 
 
 def mako_plugin(dictionary):
-  """The exported plugin code for expand_filegroups.
+    """The exported plugin code for expand_filegroups.
 
   The list of libs in the build.yaml file can contain "filegroups" tags.
   These refer to the filegroups in the root object. We will expand and
   merge filegroups on the src, headers and public_headers properties.
 
   """
-  libs = dictionary.get('libs')
-  targets = dictionary.get('targets')
-  filegroups_list = dictionary.get('filegroups')
-  filegroups = {}
+    libs = dictionary.get('libs')
+    targets = dictionary.get('targets')
+    filegroups_list = dictionary.get('filegroups')
+    filegroups = {}
 
-  for fg in filegroups_list:
-    for lst in FILEGROUP_LISTS:
-      fg[lst] = fg.get(lst, [])
-      fg['own_%s' % lst] = list(fg[lst])
-    for attr, val in FILEGROUP_DEFAULTS.iteritems():
-      if attr not in fg:
-        fg[attr] = val
-
-  todo = list(filegroups_list)
-  skips = 0
-
-  while todo:
-    assert skips != len(todo), "infinite loop in filegroup uses clauses: %r" % [t['name'] for t in todo]
-    # take the first element of the todo list
-    cur = todo[0]
-    todo = todo[1:]
-    # check all uses filegroups are present (if no, skip and come back later)
-    skip = False
-    for uses in cur.get('uses', []):
-      if uses not in filegroups:
-        skip = True
-    if skip:
-      skips += 1
-      todo.append(cur)
-    else:
-      skips = 0
-      assert 'plugins' not in cur
-      plugins = []
-      for uses in cur.get('uses', []):
-        for plugin in filegroups[uses]['plugins']:
-          if plugin not in plugins:
-            plugins.append(plugin)
+    for fg in filegroups_list:
         for lst in FILEGROUP_LISTS:
-          vals = cur.get(lst, [])
-          vals.extend(filegroups[uses].get(lst, []))
-          cur[lst] = vals
-      cur_plugin_name = cur.get('plugin')
-      if cur_plugin_name:
-        plugins.append(cur_plugin_name)
-      cur['plugins'] = plugins
-      filegroups[cur['name']] = cur
+            fg[lst] = fg.get(lst, [])
+            fg['own_%s' % lst] = list(fg[lst])
+        for attr, val in FILEGROUP_DEFAULTS.iteritems():
+            if attr not in fg:
+                fg[attr] = val
 
-  # build reverse dependency map
-  things = {}
-  for thing in dictionary['libs'] + dictionary['targets'] + dictionary['filegroups']:
-    things[thing['name']] = thing
-    thing['used_by'] = []
-  thing_deps = lambda t: t.get('uses', []) + t.get('filegroups', []) + t.get('deps', [])
-  for thing in things.itervalues():
-    done = set()
-    todo = thing_deps(thing)
+    todo = list(filegroups_list)
+    skips = 0
+
     while todo:
-      cur = todo[0]
-      todo = todo[1:]
-      if cur in done: continue
-      things[cur]['used_by'].append(thing['name'])
-      todo.extend(thing_deps(things[cur]))
-      done.add(cur)
+        assert skips != len(
+            todo), "infinite loop in filegroup uses clauses: %r" % [
+                t['name'] for t in todo
+            ]
+        # take the first element of the todo list
+        cur = todo[0]
+        todo = todo[1:]
+        # check all uses filegroups are present (if no, skip and come back later)
+        skip = False
+        for uses in cur.get('uses', []):
+            if uses not in filegroups:
+                skip = True
+        if skip:
+            skips += 1
+            todo.append(cur)
+        else:
+            skips = 0
+            assert 'plugins' not in cur
+            plugins = []
+            for uses in cur.get('uses', []):
+                for plugin in filegroups[uses]['plugins']:
+                    if plugin not in plugins:
+                        plugins.append(plugin)
+                for lst in FILEGROUP_LISTS:
+                    vals = cur.get(lst, [])
+                    vals.extend(filegroups[uses].get(lst, []))
+                    cur[lst] = vals
+            cur_plugin_name = cur.get('plugin')
+            if cur_plugin_name:
+                plugins.append(cur_plugin_name)
+            cur['plugins'] = plugins
+            filegroups[cur['name']] = cur
 
-  # the above expansion can introduce duplicate filenames: contract them here
-  for fg in filegroups.itervalues():
-    for lst in FILEGROUP_LISTS:
-      fg[lst] = uniquify(fg.get(lst, []))
+    # build reverse dependency map
+    things = {}
+    for thing in dictionary['libs'] + dictionary['targets'] + dictionary[
+            'filegroups']:
+        things[thing['name']] = thing
+        thing['used_by'] = []
+    thing_deps = lambda t: t.get('uses', []) + t.get('filegroups', []) + t.get('deps', [])
+    for thing in things.itervalues():
+        done = set()
+        todo = thing_deps(thing)
+        while todo:
+            cur = todo[0]
+            todo = todo[1:]
+            if cur in done: continue
+            things[cur]['used_by'].append(thing['name'])
+            todo.extend(thing_deps(things[cur]))
+            done.add(cur)
 
-  for tgt in dictionary['targets']:
-    for lst in FILEGROUP_LISTS:
-      tgt[lst] = tgt.get(lst, [])
-      tgt['own_%s' % lst] = list(tgt[lst])
+    # the above expansion can introduce duplicate filenames: contract them here
+    for fg in filegroups.itervalues():
+        for lst in FILEGROUP_LISTS:
+            fg[lst] = uniquify(fg.get(lst, []))
 
-  for lib in libs + targets:
-    assert 'plugins' not in lib
-    plugins = []
-    for lst in FILEGROUP_LISTS:
-      vals = lib.get(lst, [])
-      lib[lst] = list(vals)
-      lib['own_%s' % lst] = list(vals)
-    for fg_name in lib.get('filegroups', []):
-      fg = filegroups[fg_name]
-      for plugin in fg['plugins']:
-        if plugin not in plugins:
-          plugins.append(plugin)
-      for lst in FILEGROUP_LISTS:
-        vals = lib.get(lst, [])
-        vals.extend(fg.get(lst, []))
-        lib[lst] = vals
-      lib['plugins'] = plugins
-    if lib.get('generate_plugin_registry', False):
-      lib['src'].append('src/core/plugin_registry/%s_plugin_registry.cc' %
-                        lib['name'])
-    for lst in FILEGROUP_LISTS:
-      lib[lst] = uniquify(lib.get(lst, []))
+    for tgt in dictionary['targets']:
+        for lst in FILEGROUP_LISTS:
+            tgt[lst] = tgt.get(lst, [])
+            tgt['own_%s' % lst] = list(tgt[lst])
+
+    for lib in libs + targets:
+        assert 'plugins' not in lib
+        plugins = []
+        for lst in FILEGROUP_LISTS:
+            vals = lib.get(lst, [])
+            lib[lst] = list(vals)
+            lib['own_%s' % lst] = list(vals)
+        for fg_name in lib.get('filegroups', []):
+            fg = filegroups[fg_name]
+            for plugin in fg['plugins']:
+                if plugin not in plugins:
+                    plugins.append(plugin)
+            for lst in FILEGROUP_LISTS:
+                vals = lib.get(lst, [])
+                vals.extend(fg.get(lst, []))
+                lib[lst] = vals
+            lib['plugins'] = plugins
+        if lib.get('generate_plugin_registry', False):
+            lib['src'].append('src/core/plugin_registry/%s_plugin_registry.cc' %
+                              lib['name'])
+        for lst in FILEGROUP_LISTS:
+            lib[lst] = uniquify(lib.get(lst, []))
diff --git a/tools/buildgen/plugins/expand_version.py b/tools/buildgen/plugins/expand_version.py
index d8a3600..8f56ce8 100755
--- a/tools/buildgen/plugins/expand_version.py
+++ b/tools/buildgen/plugins/expand_version.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Buildgen package version plugin
 
 This parses the list of targets from the yaml build file, and creates
@@ -19,84 +18,90 @@
 
 """
 
-
 import re
 
 LANGUAGES = [
-  'core',
-  'cpp',
-  'csharp',
-  'objc',
-  'php',
-  'python',
-  'ruby',
-  ]
+    'core',
+    'cpp',
+    'csharp',
+    'objc',
+    'php',
+    'python',
+    'ruby',
+]
+
 
 class Version:
 
-  def __init__(self, s):
-    self.tag = None
-    if '-' in s:
-      s, self.tag = s.split('-')
-    self.major, self.minor, self.patch = [int(x) for x in s.split('.')]
+    def __init__(self, s):
+        self.tag = None
+        if '-' in s:
+            s, self.tag = s.split('-')
+        self.major, self.minor, self.patch = [int(x) for x in s.split('.')]
 
-  def __str__(self):
-    """Version string in a somewhat idiomatic style for most languages"""
-    s = '%d.%d.%d' % (self.major, self.minor, self.patch)
-    if self.tag:
-      s += '-%s' % self.tag
-    return s
+    def __str__(self):
+        """Version string in a somewhat idiomatic style for most languages"""
+        s = '%d.%d.%d' % (self.major, self.minor, self.patch)
+        if self.tag:
+            s += '-%s' % self.tag
+        return s
 
-  def pep440(self):
-    """Version string in Python PEP440 style"""
-    s = '%d.%d.%d' % (self.major, self.minor, self.patch)
-    if self.tag:
-      # we need to translate from grpc version tags to pep440 version
-      # tags; this code is likely to be a little ad-hoc
-      if self.tag == 'dev':
-        s += '.dev0'
-      elif len(self.tag) >= 3 and self.tag[0:3] == 'pre':
-        s += 'rc%d' % int(self.tag[3:])
-      else:
-        raise Exception('Don\'t know how to translate version tag "%s" to pep440' % self.tag)
-    return s
+    def pep440(self):
+        """Version string in Python PEP440 style"""
+        s = '%d.%d.%d' % (self.major, self.minor, self.patch)
+        if self.tag:
+            # we need to translate from grpc version tags to pep440 version
+            # tags; this code is likely to be a little ad-hoc
+            if self.tag == 'dev':
+                s += '.dev0'
+            elif len(self.tag) >= 3 and self.tag[0:3] == 'pre':
+                s += 'rc%d' % int(self.tag[3:])
+            else:
+                raise Exception(
+                    'Don\'t know how to translate version tag "%s" to pep440' %
+                    self.tag)
+        return s
 
-  def ruby(self):
-    """Version string in Ruby style"""
-    if self.tag:
-      return '%d.%d.%d.%s' % (self.major, self.minor, self.patch, self.tag)
-    else:
-      return '%d.%d.%d' % (self.major, self.minor, self.patch)
+    def ruby(self):
+        """Version string in Ruby style"""
+        if self.tag:
+            return '%d.%d.%d.%s' % (self.major, self.minor, self.patch,
+                                    self.tag)
+        else:
+            return '%d.%d.%d' % (self.major, self.minor, self.patch)
 
-  def php(self):
-    """Version string for PHP PECL package"""
-    s = '%d.%d.%d' % (self.major, self.minor, self.patch)
-    if self.tag:
-      if self.tag == 'dev':
-        s += 'dev'
-      elif len(self.tag) >= 3 and self.tag[0:3] == 'pre':
-        s += 'RC%d' % int(self.tag[3:])
-      else:
-        raise Exception('Don\'t know how to translate version tag "%s" to PECL version' % self.tag)
-    return s
+    def php(self):
+        """Version string for PHP PECL package"""
+        s = '%d.%d.%d' % (self.major, self.minor, self.patch)
+        if self.tag:
+            if self.tag == 'dev':
+                s += 'dev'
+            elif len(self.tag) >= 3 and self.tag[0:3] == 'pre':
+                s += 'RC%d' % int(self.tag[3:])
+            else:
+                raise Exception(
+                    'Don\'t know how to translate version tag "%s" to PECL version'
+                    % self.tag)
+        return s
 
-  def php_composer(self):
-    """Version string for PHP Composer package"""
-    return '%d.%d.%d' % (self.major, self.minor, self.patch)
+    def php_composer(self):
+        """Version string for PHP Composer package"""
+        return '%d.%d.%d' % (self.major, self.minor, self.patch)
+
 
 def mako_plugin(dictionary):
-  """Expand version numbers:
+    """Expand version numbers:
      - for each language, ensure there's a language_version tag in
        settings (defaulting to the master version tag)
      - expand version strings to major, minor, patch, and tag
   """
 
-  settings = dictionary['settings']
-  master_version = Version(settings['version'])
-  settings['version'] = master_version
-  for language in LANGUAGES:
-    version_tag = '%s_version' % language
-    if version_tag in settings:
-      settings[version_tag] = Version(settings[version_tag])
-    else:
-      settings[version_tag] = master_version
+    settings = dictionary['settings']
+    master_version = Version(settings['version'])
+    settings['version'] = master_version
+    for language in LANGUAGES:
+        version_tag = '%s_version' % language
+        if version_tag in settings:
+            settings[version_tag] = Version(settings[version_tag])
+        else:
+            settings[version_tag] = master_version
diff --git a/tools/buildgen/plugins/generate_vsprojects.py b/tools/buildgen/plugins/generate_vsprojects.py
index 06755f6..f6251d4 100755
--- a/tools/buildgen/plugins/generate_vsprojects.py
+++ b/tools/buildgen/plugins/generate_vsprojects.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Buildgen vsprojects plugin.
 
 This parses the list of libraries, and generates globals "vsprojects"
@@ -19,62 +18,67 @@
 
 """
 
-
 import hashlib
 import re
 
 
 def mako_plugin(dictionary):
-  """The exported plugin code for generate_vsprojeccts
+    """The exported plugin code for generate_vsprojeccts
 
   We want to help the work of the visual studio generators.
 
   """
 
-  libs = dictionary.get('libs', [])
-  targets = dictionary.get('targets', [])
+    libs = dictionary.get('libs', [])
+    targets = dictionary.get('targets', [])
 
-  for lib in libs:
-    lib['is_library'] = True
-  for target in targets:
-    target['is_library'] = False
+    for lib in libs:
+        lib['is_library'] = True
+    for target in targets:
+        target['is_library'] = False
 
-  projects = []
-  projects.extend(libs)
-  projects.extend(targets)
-  for target in projects:
-    if 'build' in target and target['build'] == 'test':
-      default_test_dir = 'test'
-    else:
-      default_test_dir = '.'
-    if 'vs_config_type' not in target:
-      if 'build' in target and target['build'] == 'test':
-        target['vs_config_type'] = 'Application'
-      else:
-        target['vs_config_type'] = 'StaticLibrary'
-    if 'vs_packages' not in target:
-      target['vs_packages'] = []
-    if 'vs_props' not in target:
-      target['vs_props'] = []
-    target['vs_proj_dir'] = target.get('vs_proj_dir', default_test_dir)
-    if target.get('vs_project_guid', None) is None and 'windows' in target.get('platforms', ['windows']):
-      name = target['name']
-      guid = re.sub('(........)(....)(....)(....)(.*)',
-             r'{\1-\2-\3-\4-\5}',
-             hashlib.md5(name).hexdigest())
-      target['vs_project_guid'] = guid.upper()
-  # Exclude projects without a visual project guid, such as the tests.
-  projects = [project for project in projects
-                if project.get('vs_project_guid', None)]
+    projects = []
+    projects.extend(libs)
+    projects.extend(targets)
+    for target in projects:
+        if 'build' in target and target['build'] == 'test':
+            default_test_dir = 'test'
+        else:
+            default_test_dir = '.'
+        if 'vs_config_type' not in target:
+            if 'build' in target and target['build'] == 'test':
+                target['vs_config_type'] = 'Application'
+            else:
+                target['vs_config_type'] = 'StaticLibrary'
+        if 'vs_packages' not in target:
+            target['vs_packages'] = []
+        if 'vs_props' not in target:
+            target['vs_props'] = []
+        target['vs_proj_dir'] = target.get('vs_proj_dir', default_test_dir)
+        if target.get('vs_project_guid',
+                      None) is None and 'windows' in target.get('platforms',
+                                                                ['windows']):
+            name = target['name']
+            guid = re.sub('(........)(....)(....)(....)(.*)',
+                          r'{\1-\2-\3-\4-\5}', hashlib.md5(name).hexdigest())
+            target['vs_project_guid'] = guid.upper()
+    # Exclude projects without a visual project guid, such as the tests.
+    projects = [
+        project for project in projects if project.get('vs_project_guid', None)
+    ]
 
-  projects = [project for project in projects
-                if project['language'] != 'c++' or project['build'] == 'all' or project['build'] == 'protoc' or (project['language'] == 'c++' and  (project['build'] == 'test' or project['build'] == 'private'))]
+    projects = [
+        project for project in projects
+        if project['language'] != 'c++' or project['build'] == 'all' or project[
+            'build'] == 'protoc' or (project['language'] == 'c++' and (project[
+                'build'] == 'test' or project['build'] == 'private'))
+    ]
 
-  project_dict = dict([(p['name'], p) for p in projects])
+    project_dict = dict([(p['name'], p) for p in projects])
 
-  packages = dictionary.get('vspackages', [])
-  packages_dict = dict([(p['name'], p) for p in packages])
+    packages = dictionary.get('vspackages', [])
+    packages_dict = dict([(p['name'], p) for p in packages])
 
-  dictionary['vsprojects'] = projects
-  dictionary['vsproject_dict'] = project_dict
-  dictionary['vspackages_dict'] = packages_dict
+    dictionary['vsprojects'] = projects
+    dictionary['vsproject_dict'] = project_dict
+    dictionary['vspackages_dict'] = packages_dict
diff --git a/tools/buildgen/plugins/list_api.py b/tools/buildgen/plugins/list_api.py
index bed98da..f7ecb97 100755
--- a/tools/buildgen/plugins/list_api.py
+++ b/tools/buildgen/plugins/list_api.py
@@ -21,44 +21,47 @@
 import sys
 import yaml
 
-
 _RE_API = r'(?:GPRAPI|GRPCAPI|CENSUSAPI)([^;]*);'
 
 
 def list_c_apis(filenames):
-  for filename in filenames:
-    with open(filename, 'r') as f:
-      text = f.read()
-    for m in re.finditer(_RE_API, text):
-      api_declaration = re.sub('[ \r\n\t]+', ' ', m.group(1))
-      type_and_name, args_and_close = api_declaration.split('(', 1)
-      args = args_and_close[:args_and_close.rfind(')')].strip()
-      last_space = type_and_name.rfind(' ')
-      last_star = type_and_name.rfind('*')
-      type_end = max(last_space, last_star)
-      return_type = type_and_name[0:type_end+1].strip()
-      name = type_and_name[type_end+1:].strip()
-      yield {'return_type': return_type, 'name': name, 'arguments': args, 'header': filename}
+    for filename in filenames:
+        with open(filename, 'r') as f:
+            text = f.read()
+        for m in re.finditer(_RE_API, text):
+            api_declaration = re.sub('[ \r\n\t]+', ' ', m.group(1))
+            type_and_name, args_and_close = api_declaration.split('(', 1)
+            args = args_and_close[:args_and_close.rfind(')')].strip()
+            last_space = type_and_name.rfind(' ')
+            last_star = type_and_name.rfind('*')
+            type_end = max(last_space, last_star)
+            return_type = type_and_name[0:type_end + 1].strip()
+            name = type_and_name[type_end + 1:].strip()
+            yield {
+                'return_type': return_type,
+                'name': name,
+                'arguments': args,
+                'header': filename
+            }
 
 
 def headers_under(directory):
-  for root, dirnames, filenames in os.walk(directory):
-    for filename in fnmatch.filter(filenames, '*.h'):
-      yield os.path.join(root, filename)
+    for root, dirnames, filenames in os.walk(directory):
+        for filename in fnmatch.filter(filenames, '*.h'):
+            yield os.path.join(root, filename)
 
 
 def mako_plugin(dictionary):
-  apis = []
-  headers = []
+    apis = []
+    headers = []
 
-  for lib in dictionary['libs']:
-    if lib['name'] in ['grpc', 'gpr']:
-      headers.extend(lib['public_headers'])
+    for lib in dictionary['libs']:
+        if lib['name'] in ['grpc', 'gpr']:
+            headers.extend(lib['public_headers'])
 
-  apis.extend(list_c_apis(sorted(set(headers))))
-  dictionary['c_apis'] = apis
+    apis.extend(list_c_apis(sorted(set(headers))))
+    dictionary['c_apis'] = apis
 
 
 if __name__ == '__main__':
-  print yaml.dump([api for api in list_c_apis(headers_under('include/grpc'))])
-
+    print yaml.dump([api for api in list_c_apis(headers_under('include/grpc'))])
diff --git a/tools/buildgen/plugins/list_protos.py b/tools/buildgen/plugins/list_protos.py
index 07a860c..0aa5fe5 100755
--- a/tools/buildgen/plugins/list_protos.py
+++ b/tools/buildgen/plugins/list_protos.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Buildgen .proto files list plugin.
 
 This parses the list of targets from the yaml build file, and creates
@@ -19,12 +18,11 @@
 
 """
 
-
 import re
 
 
 def mako_plugin(dictionary):
-  """The exported plugin code for list_protos.
+    """The exported plugin code for list_protos.
 
   Some projects generators may want to get the full list of unique .proto files
   that are being included in a project. This code extracts all files referenced
@@ -33,23 +31,23 @@
 
   """
 
-  libs = dictionary.get('libs', [])
-  targets = dictionary.get('targets', [])
+    libs = dictionary.get('libs', [])
+    targets = dictionary.get('targets', [])
 
-  proto_re = re.compile('(.*)\\.proto')
+    proto_re = re.compile('(.*)\\.proto')
 
-  protos = set()
-  for lib in libs:
-    for src in lib.get('src', []):
-      m = proto_re.match(src)
-      if m:
-        protos.add(m.group(1))
-  for tgt in targets:
-    for src in tgt.get('src', []):
-      m = proto_re.match(src)
-      if m:
-        protos.add(m.group(1))
+    protos = set()
+    for lib in libs:
+        for src in lib.get('src', []):
+            m = proto_re.match(src)
+            if m:
+                protos.add(m.group(1))
+    for tgt in targets:
+        for src in tgt.get('src', []):
+            m = proto_re.match(src)
+            if m:
+                protos.add(m.group(1))
 
-  protos = sorted(protos)
+    protos = sorted(protos)
 
-  dictionary['protos'] = protos
+    dictionary['protos'] = protos
diff --git a/tools/buildgen/plugins/make_fuzzer_tests.py b/tools/buildgen/plugins/make_fuzzer_tests.py
index 56dad2d..f644a7c 100644
--- a/tools/buildgen/plugins/make_fuzzer_tests.py
+++ b/tools/buildgen/plugins/make_fuzzer_tests.py
@@ -11,35 +11,37 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Create tests for each fuzzer"""
 
 import copy
 import glob
 
+
 def mako_plugin(dictionary):
-  targets = dictionary['targets']
-  tests = dictionary['tests']
-  for tgt in targets:
-    if tgt['build'] == 'fuzzer':
-      new_target = copy.deepcopy(tgt)
-      new_target['build'] = 'test'
-      new_target['name'] += '_one_entry'
-      new_target['run'] = False
-      new_target['src'].append('test/core/util/one_corpus_entry_fuzzer.cc')
-      new_target['own_src'].append('test/core/util/one_corpus_entry_fuzzer.cc')
-      targets.append(new_target)
-      for corpus in new_target['corpus_dirs']:
-        for fn in sorted(glob.glob('%s/*' % corpus)):
-          tests.append({
-              'name': new_target['name'],
-              'args': [fn],
-              'exclude_iomgrs': ['uv'],
-              'exclude_configs': ['tsan'],
-              'uses_polling': False,
-              'platforms': ['mac', 'linux'],
-              'ci_platforms': ['linux'],
-              'flaky': False,
-              'language': 'c',
-              'cpu_cost': 0.1,
-          })
+    targets = dictionary['targets']
+    tests = dictionary['tests']
+    for tgt in targets:
+        if tgt['build'] == 'fuzzer':
+            new_target = copy.deepcopy(tgt)
+            new_target['build'] = 'test'
+            new_target['name'] += '_one_entry'
+            new_target['run'] = False
+            new_target['src'].append(
+                'test/core/util/one_corpus_entry_fuzzer.cc')
+            new_target['own_src'].append(
+                'test/core/util/one_corpus_entry_fuzzer.cc')
+            targets.append(new_target)
+            for corpus in new_target['corpus_dirs']:
+                for fn in sorted(glob.glob('%s/*' % corpus)):
+                    tests.append({
+                        'name': new_target['name'],
+                        'args': [fn],
+                        'exclude_iomgrs': ['uv'],
+                        'exclude_configs': ['tsan'],
+                        'uses_polling': False,
+                        'platforms': ['mac', 'linux'],
+                        'ci_platforms': ['linux'],
+                        'flaky': False,
+                        'language': 'c',
+                        'cpu_cost': 0.1,
+                    })
diff --git a/tools/buildgen/plugins/transitive_dependencies.py b/tools/buildgen/plugins/transitive_dependencies.py
index bf5263e..5373bca 100644
--- a/tools/buildgen/plugins/transitive_dependencies.py
+++ b/tools/buildgen/plugins/transitive_dependencies.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Buildgen transitive dependencies
 
 This takes the list of libs, node_modules, and targets from our
@@ -20,35 +19,40 @@
 
 """
 
+
 def get_lib(libs, name):
-  try:
-    return next(lib for lib in libs if lib['name']==name)
-  except StopIteration:
-    return None
+    try:
+        return next(lib for lib in libs if lib['name'] == name)
+    except StopIteration:
+        return None
+
 
 def transitive_deps(lib, libs):
-  if lib is not None and 'deps' in lib:
-    # Recursively call transitive_deps on each dependency, and take the union
-    return set.union(set(lib['deps']),
-                     *[set(transitive_deps(get_lib(libs, dep), libs))
-                       for dep in lib['deps']])
-  else:
-    return set()
+    if lib is not None and 'deps' in lib:
+        # Recursively call transitive_deps on each dependency, and take the union
+        return set.union(
+            set(lib['deps']), *[
+                set(transitive_deps(get_lib(libs, dep), libs))
+                for dep in lib['deps']
+            ])
+    else:
+        return set()
+
 
 def mako_plugin(dictionary):
-  """The exported plugin code for transitive_dependencies.
+    """The exported plugin code for transitive_dependencies.
 
   Iterate over each list and check each item for a deps list. We add a
   transitive_deps property to each with the transitive closure of those
   dependency lists.
   """
-  libs = dictionary.get('libs')
+    libs = dictionary.get('libs')
 
-  for target_name, target_list in dictionary.items():
-    for target in target_list:
-      if isinstance(target, dict) and 'deps' in target:
-        target['transitive_deps'] = transitive_deps(target, libs)
+    for target_name, target_list in dictionary.items():
+        for target in target_list:
+            if isinstance(target, dict) and 'deps' in target:
+                target['transitive_deps'] = transitive_deps(target, libs)
 
-  python_dependencies = dictionary.get('python_dependencies')
-  python_dependencies['transitive_deps'] = (
-      transitive_deps(python_dependencies, libs))
+    python_dependencies = dictionary.get('python_dependencies')
+    python_dependencies['transitive_deps'] = (
+        transitive_deps(python_dependencies, libs))
diff --git a/tools/codegen/core/gen_header_frame.py b/tools/codegen/core/gen_header_frame.py
index 5375c14..7219d4d 100755
--- a/tools/codegen/core/gen_header_frame.py
+++ b/tools/codegen/core/gen_header_frame.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Read from stdin a set of colon separated http headers:
    :path: /foo/bar
    content-type: application/grpc
@@ -24,109 +23,118 @@
 import sys
 import argparse
 
+
 def append_never_indexed(payload_line, n, count, key, value):
-  payload_line.append(0x10)
-  assert(len(key) <= 126)
-  payload_line.append(len(key))
-  payload_line.extend(ord(c) for c in key)
-  assert(len(value) <= 126)
-  payload_line.append(len(value))
-  payload_line.extend(ord(c) for c in value)
+    payload_line.append(0x10)
+    assert (len(key) <= 126)
+    payload_line.append(len(key))
+    payload_line.extend(ord(c) for c in key)
+    assert (len(value) <= 126)
+    payload_line.append(len(value))
+    payload_line.extend(ord(c) for c in value)
+
 
 def append_inc_indexed(payload_line, n, count, key, value):
-  payload_line.append(0x40)
-  assert(len(key) <= 126)
-  payload_line.append(len(key))
-  payload_line.extend(ord(c) for c in key)
-  assert(len(value) <= 126)
-  payload_line.append(len(value))
-  payload_line.extend(ord(c) for c in value)
+    payload_line.append(0x40)
+    assert (len(key) <= 126)
+    payload_line.append(len(key))
+    payload_line.extend(ord(c) for c in key)
+    assert (len(value) <= 126)
+    payload_line.append(len(value))
+    payload_line.extend(ord(c) for c in value)
+
 
 def append_pre_indexed(payload_line, n, count, key, value):
-  payload_line.append(0x80 + 61 + count - n)
+    payload_line.append(0x80 + 61 + count - n)
+
 
 _COMPRESSORS = {
-  'never': append_never_indexed,
-  'inc': append_inc_indexed,
-  'pre': append_pre_indexed,
+    'never': append_never_indexed,
+    'inc': append_inc_indexed,
+    'pre': append_pre_indexed,
 }
 
 argp = argparse.ArgumentParser('Generate header frames')
-argp.add_argument('--set_end_stream', default=False, action='store_const', const=True)
-argp.add_argument('--no_framing', default=False, action='store_const', const=True)
-argp.add_argument('--compression', choices=sorted(_COMPRESSORS.keys()), default='never')
+argp.add_argument(
+    '--set_end_stream', default=False, action='store_const', const=True)
+argp.add_argument(
+    '--no_framing', default=False, action='store_const', const=True)
+argp.add_argument(
+    '--compression', choices=sorted(_COMPRESSORS.keys()), default='never')
 argp.add_argument('--hex', default=False, action='store_const', const=True)
 args = argp.parse_args()
 
 # parse input, fill in vals
 vals = []
 for line in sys.stdin:
-  line = line.strip()
-  if line == '': continue
-  if line[0] == '#': continue
-  key_tail, value = line[1:].split(':')
-  key = (line[0] + key_tail).strip()
-  value = value.strip()
-  vals.append((key, value))
+    line = line.strip()
+    if line == '': continue
+    if line[0] == '#': continue
+    key_tail, value = line[1:].split(':')
+    key = (line[0] + key_tail).strip()
+    value = value.strip()
+    vals.append((key, value))
 
 # generate frame payload binary data
 payload_bytes = []
 if not args.no_framing:
-  payload_bytes.append([]) # reserve space for header
+    payload_bytes.append([])  # reserve space for header
 payload_len = 0
 n = 0
 for key, value in vals:
-  payload_line = []
-  _COMPRESSORS[args.compression](payload_line, n, len(vals), key, value)
-  n += 1
-  payload_len += len(payload_line)
-  payload_bytes.append(payload_line)
+    payload_line = []
+    _COMPRESSORS[args.compression](payload_line, n, len(vals), key, value)
+    n += 1
+    payload_len += len(payload_line)
+    payload_bytes.append(payload_line)
 
 # fill in header
 if not args.no_framing:
-  flags = 0x04  # END_HEADERS
-  if args.set_end_stream:
-    flags |= 0x01  # END_STREAM
-  payload_bytes[0].extend([
-      (payload_len >> 16) & 0xff,
-      (payload_len >> 8) & 0xff,
-      (payload_len) & 0xff,
-      # header frame
-      0x01,
-      # flags
-      flags,
-      # stream id
-      0x00,
-      0x00,
-      0x00,
-      0x01
-  ])
+    flags = 0x04  # END_HEADERS
+    if args.set_end_stream:
+        flags |= 0x01  # END_STREAM
+    payload_bytes[0].extend([
+        (payload_len >> 16) & 0xff,
+        (payload_len >> 8) & 0xff,
+        (payload_len) & 0xff,
+        # header frame
+        0x01,
+        # flags
+        flags,
+        # stream id
+        0x00,
+        0x00,
+        0x00,
+        0x01
+    ])
 
 hex_bytes = [ord(c) for c in "abcdefABCDEF0123456789"]
 
+
 def esc_c(line):
-  out = "\""
-  last_was_hex = False
-  for c in line:
-    if 32 <= c < 127:
-      if c in hex_bytes and last_was_hex:
-        out += "\"\""
-      if c != ord('"'):
-        out += chr(c)
-      else:
-        out += "\\\""
-      last_was_hex = False
-    else:
-      out += "\\x%02x" % c
-      last_was_hex = True
-  return out + "\""
+    out = "\""
+    last_was_hex = False
+    for c in line:
+        if 32 <= c < 127:
+            if c in hex_bytes and last_was_hex:
+                out += "\"\""
+            if c != ord('"'):
+                out += chr(c)
+            else:
+                out += "\\\""
+            last_was_hex = False
+        else:
+            out += "\\x%02x" % c
+            last_was_hex = True
+    return out + "\""
+
 
 # dump bytes
 if args.hex:
-  all_bytes = []
-  for line in payload_bytes:
-    all_bytes.extend(line)
-  print '{%s}' % ', '.join('0x%02x' % c for c in all_bytes)
+    all_bytes = []
+    for line in payload_bytes:
+        all_bytes.extend(line)
+    print '{%s}' % ', '.join('0x%02x' % c for c in all_bytes)
 else:
-  for line in payload_bytes:
-    print esc_c(line)
+    for line in payload_bytes:
+        print esc_c(line)
diff --git a/tools/codegen/core/gen_hpack_tables.c b/tools/codegen/core/gen_hpack_tables.cc
similarity index 99%
rename from tools/codegen/core/gen_hpack_tables.c
rename to tools/codegen/core/gen_hpack_tables.cc
index 73dfa9f..0e7a7b8 100644
--- a/tools/codegen/core/gen_hpack_tables.c
+++ b/tools/codegen/core/gen_hpack_tables.cc
@@ -16,7 +16,7 @@
  *
  */
 
-/* generates constant tables for hpack.c */
+/* generates constant tables for hpack.cc */
 
 #include <assert.h>
 #include <stddef.h>
diff --git a/tools/codegen/core/gen_legal_metadata_characters.c b/tools/codegen/core/gen_legal_metadata_characters.cc
similarity index 96%
rename from tools/codegen/core/gen_legal_metadata_characters.c
rename to tools/codegen/core/gen_legal_metadata_characters.cc
index 5e292ab..fbabd24 100644
--- a/tools/codegen/core/gen_legal_metadata_characters.c
+++ b/tools/codegen/core/gen_legal_metadata_characters.cc
@@ -16,7 +16,7 @@
  *
  */
 
-/* generates constant table for metadata.c */
+/* generates constant table for metadata.cc */
 
 #include <stdio.h>
 #include <string.h>
diff --git a/tools/codegen/core/gen_percent_encoding_tables.c b/tools/codegen/core/gen_percent_encoding_tables.cc
similarity index 97%
rename from tools/codegen/core/gen_percent_encoding_tables.c
rename to tools/codegen/core/gen_percent_encoding_tables.cc
index 49ea5ea..a99024e 100644
--- a/tools/codegen/core/gen_percent_encoding_tables.c
+++ b/tools/codegen/core/gen_percent_encoding_tables.cc
@@ -16,7 +16,7 @@
  *
  */
 
-/* generates constant table for metadata.c */
+/* generates constant table for metadata.cc */
 
 #include <stdio.h>
 #include <string.h>
diff --git a/tools/codegen/core/gen_server_registered_method_bad_client_test_body.py b/tools/codegen/core/gen_server_registered_method_bad_client_test_body.py
index 8b5b618..fa87c97 100755
--- a/tools/codegen/core/gen_server_registered_method_bad_client_test_body.py
+++ b/tools/codegen/core/gen_server_registered_method_bad_client_test_body.py
@@ -14,48 +14,42 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+
 def esc_c(line):
-  out = "\""
-  last_was_hex = False
-  for c in line:
-    if 32 <= c < 127:
-      if c in hex_bytes and last_was_hex:
-        out += "\"\""
-      if c != ord('"'):
-        out += chr(c)
-      else:
-        out += "\\\""
-      last_was_hex = False
-    else:
-      out += "\\x%02x" % c
-      last_was_hex = True
-  return out + "\""
+    out = "\""
+    last_was_hex = False
+    for c in line:
+        if 32 <= c < 127:
+            if c in hex_bytes and last_was_hex:
+                out += "\"\""
+            if c != ord('"'):
+                out += chr(c)
+            else:
+                out += "\\\""
+            last_was_hex = False
+        else:
+            out += "\\x%02x" % c
+            last_was_hex = True
+    return out + "\""
+
 
 done = set()
 
 for message_length in range(0, 3):
-  for send_message_length in range(0, message_length + 1):
-    payload = [
-      0,
-      (message_length >> 24) & 0xff,
-      (message_length >> 16) & 0xff,
-      (message_length >> 8) & 0xff,
-      (message_length) & 0xff
-    ] + send_message_length * [0]
-    for frame_length in range(0, len(payload) + 1):
-      is_end = frame_length == len(payload) and send_message_length == message_length
-      frame = [
-        (frame_length >> 16) & 0xff,
-        (frame_length >> 8) & 0xff,
-        (frame_length) & 0xff,
-        0,
-        1 if is_end else 0,
-        0, 0, 0, 1
-      ] + payload[0:frame_length]
-      text = esc_c(frame)
-      if text not in done:
-        print 'GRPC_RUN_BAD_CLIENT_TEST(verifier_%s, PFX_STR %s, %s);' % (
-            'succeeds' if is_end else 'fails', 
-            text, 
-            '0' if is_end else 'GRPC_BAD_CLIENT_DISCONNECT')
-        done.add(text)
+    for send_message_length in range(0, message_length + 1):
+        payload = [
+            0, (message_length >> 24) & 0xff, (message_length >> 16) & 0xff,
+            (message_length >> 8) & 0xff, (message_length) & 0xff
+        ] + send_message_length * [0]
+        for frame_length in range(0, len(payload) + 1):
+            is_end = frame_length == len(
+                payload) and send_message_length == message_length
+            frame = [(frame_length >> 16) & 0xff, (frame_length >> 8) & 0xff,
+                     (frame_length) & 0xff, 0, 1
+                     if is_end else 0, 0, 0, 0, 1] + payload[0:frame_length]
+            text = esc_c(frame)
+            if text not in done:
+                print 'GRPC_RUN_BAD_CLIENT_TEST(verifier_%s, PFX_STR %s, %s);' % (
+                    'succeeds' if is_end else 'fails', text, '0'
+                    if is_end else 'GRPC_BAD_CLIENT_DISCONNECT')
+                done.add(text)
diff --git a/tools/codegen/core/gen_settings_ids.py b/tools/codegen/core/gen_settings_ids.py
index 481c421..bc43806 100755
--- a/tools/codegen/core/gen_settings_ids.py
+++ b/tools/codegen/core/gen_settings_ids.py
@@ -24,92 +24,114 @@
 OnError = collections.namedtuple('OnError', 'behavior code')
 clamp_invalid_value = OnError('CLAMP_INVALID_VALUE', 'PROTOCOL_ERROR')
 disconnect_on_invalid_value = lambda e: OnError('DISCONNECT_ON_INVALID_VALUE', e)
-DecoratedSetting = collections.namedtuple('DecoratedSetting', 'enum name setting')
+DecoratedSetting = collections.namedtuple('DecoratedSetting',
+                                          'enum name setting')
 
 _SETTINGS = {
-  'HEADER_TABLE_SIZE': Setting(1, 4096, 0, 0xffffffff, clamp_invalid_value),
-  'ENABLE_PUSH': Setting(2, 1, 0, 1, disconnect_on_invalid_value('PROTOCOL_ERROR')),
-  'MAX_CONCURRENT_STREAMS': Setting(3, 0xffffffff, 0, 0xffffffff, disconnect_on_invalid_value('PROTOCOL_ERROR')),
-  'INITIAL_WINDOW_SIZE': Setting(4, 65535, 0, 0x7fffffff, disconnect_on_invalid_value('FLOW_CONTROL_ERROR')),
-  'MAX_FRAME_SIZE': Setting(5, 16384, 16384, 16777215, disconnect_on_invalid_value('PROTOCOL_ERROR')),
-  'MAX_HEADER_LIST_SIZE': Setting(6, _MAX_HEADER_LIST_SIZE, 0, _MAX_HEADER_LIST_SIZE, clamp_invalid_value),
-  'GRPC_ALLOW_TRUE_BINARY_METADATA': Setting(0xfe03, 0, 0, 1, clamp_invalid_value),
+    'HEADER_TABLE_SIZE':
+    Setting(1, 4096, 0, 0xffffffff, clamp_invalid_value),
+    'ENABLE_PUSH':
+    Setting(2, 1, 0, 1, disconnect_on_invalid_value('PROTOCOL_ERROR')),
+    'MAX_CONCURRENT_STREAMS':
+    Setting(3, 0xffffffff, 0, 0xffffffff,
+            disconnect_on_invalid_value('PROTOCOL_ERROR')),
+    'INITIAL_WINDOW_SIZE':
+    Setting(4, 65535, 0, 0x7fffffff,
+            disconnect_on_invalid_value('FLOW_CONTROL_ERROR')),
+    'MAX_FRAME_SIZE':
+    Setting(5, 16384, 16384, 16777215,
+            disconnect_on_invalid_value('PROTOCOL_ERROR')),
+    'MAX_HEADER_LIST_SIZE':
+    Setting(6, _MAX_HEADER_LIST_SIZE, 0, _MAX_HEADER_LIST_SIZE,
+            clamp_invalid_value),
+    'GRPC_ALLOW_TRUE_BINARY_METADATA':
+    Setting(0xfe03, 0, 0, 1, clamp_invalid_value),
 }
 
 H = open('src/core/ext/transport/chttp2/transport/http2_settings.h', 'w')
 C = open('src/core/ext/transport/chttp2/transport/http2_settings.c', 'w')
 
+
 # utility: print a big comment block into a set of files
 def put_banner(files, banner):
-  for f in files:
-    print >>f, '/*'
-    for line in banner:
-      print >>f, ' * %s' % line
-    print >>f, ' */'
-    print >>f
+    for f in files:
+        print >> f, '/*'
+        for line in banner:
+            print >> f, ' * %s' % line
+        print >> f, ' */'
+        print >> f
+
 
 # copy-paste copyright notice from this file
 with open(sys.argv[0]) as my_source:
-  copyright = []
-  for line in my_source:
-    if line[0] != '#': break
-  for line in my_source:
-    if line[0] == '#':
-      copyright.append(line)
-      break
-  for line in my_source:
-    if line[0] != '#':
-      break
-    copyright.append(line)
-  put_banner([H,C], [line[2:].rstrip() for line in copyright])
+    copyright = []
+    for line in my_source:
+        if line[0] != '#': break
+    for line in my_source:
+        if line[0] == '#':
+            copyright.append(line)
+            break
+    for line in my_source:
+        if line[0] != '#':
+            break
+        copyright.append(line)
+    put_banner([H, C], [line[2:].rstrip() for line in copyright])
 
-put_banner([H,C], ["Automatically generated by tools/codegen/core/gen_settings_ids.py"])
+put_banner(
+    [H, C],
+    ["Automatically generated by tools/codegen/core/gen_settings_ids.py"])
 
-print >>H, "#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H"
-print >>H, "#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H"
-print >>H
-print >>H, "#include <stdint.h>"
-print >>H, "#include <stdbool.h>"
-print >>H
+print >> H, "#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H"
+print >> H, "#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H"
+print >> H
+print >> H, "#include <stdint.h>"
+print >> H, "#include <stdbool.h>"
+print >> H
 
-print >>C, "#include \"src/core/ext/transport/chttp2/transport/http2_settings.h\""
-print >>C
-print >>C, "#include <grpc/support/useful.h>"
-print >>C, "#include \"src/core/lib/transport/http2_errors.h\""
-print >>C
+print >> C, "#include \"src/core/ext/transport/chttp2/transport/http2_settings.h\""
+print >> C
+print >> C, "#include <grpc/support/useful.h>"
+print >> C, "#include \"src/core/lib/transport/http2_errors.h\""
+print >> C
 
 p = perfection.hash_parameters(sorted(x.id for x in _SETTINGS.values()))
 print p
 
+
 def hash(i):
-  i += p.offset
-  x = i % p.t
-  y = i / p.t
-  return x + p.r[y]
+    i += p.offset
+    x = i % p.t
+    y = i / p.t
+    return x + p.r[y]
 
-decorated_settings = [DecoratedSetting(hash(setting.id), name, setting)
-                      for name, setting in _SETTINGS.iteritems()]
 
-print >>H, 'typedef enum {'
+decorated_settings = [
+    DecoratedSetting(hash(setting.id), name, setting)
+    for name, setting in _SETTINGS.iteritems()
+]
+
+print >> H, 'typedef enum {'
 for decorated_setting in sorted(decorated_settings):
-  print >>H, '  GRPC_CHTTP2_SETTINGS_%s = %d, /* wire id %d */' % (
-      decorated_setting.name, decorated_setting.enum, decorated_setting.setting.id)
-print >>H, '} grpc_chttp2_setting_id;'
-print >>H
-print >>H, '#define GRPC_CHTTP2_NUM_SETTINGS %d' % (max(x.enum for x in decorated_settings) + 1)
+    print >> H, '  GRPC_CHTTP2_SETTINGS_%s = %d, /* wire id %d */' % (
+        decorated_setting.name, decorated_setting.enum,
+        decorated_setting.setting.id)
+print >> H, '} grpc_chttp2_setting_id;'
+print >> H
+print >> H, '#define GRPC_CHTTP2_NUM_SETTINGS %d' % (
+    max(x.enum for x in decorated_settings) + 1)
 
-print >>H, 'extern const uint16_t grpc_setting_id_to_wire_id[];'
-print >>C, 'const uint16_t grpc_setting_id_to_wire_id[] = {%s};' % ','.join(
+print >> H, 'extern const uint16_t grpc_setting_id_to_wire_id[];'
+print >> C, 'const uint16_t grpc_setting_id_to_wire_id[] = {%s};' % ','.join(
     '%d' % s for s in p.slots)
-print >>H
-print >>H, "bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out);"
+print >> H
+print >> H, "bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out);"
 cgargs = {
-      'r': ','.join('%d' % (r if r is not None else 0) for r in p.r),
-      't': p.t,
-      'offset': abs(p.offset),
-      'offset_sign': '+' if p.offset > 0 else '-'
-  }
-print >>C, """
+    'r': ','.join('%d' % (r if r is not None else 0) for r in p.r),
+    't': p.t,
+    'offset': abs(p.offset),
+    'offset_sign': '+' if p.offset > 0 else '-'
+}
+print >> C, """
 bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out) {
   uint32_t i = wire_id %(offset_sign)s %(offset)d;
   uint32_t x = i %% %(t)d;
@@ -118,17 +140,17 @@
   switch (y) {
 """ % cgargs
 for i, r in enumerate(p.r):
-  if not r: continue
-  if r < 0: print >>C, 'case %d: h -= %d; break;' % (i, -r)
-  else: print >>C, 'case %d: h += %d; break;' % (i, r)
-print >>C, """
+    if not r: continue
+    if r < 0: print >> C, 'case %d: h -= %d; break;' % (i, -r)
+    else: print >> C, 'case %d: h += %d; break;' % (i, r)
+print >> C, """
   }
   *out = (grpc_chttp2_setting_id)h;
   return h < GPR_ARRAY_SIZE(grpc_setting_id_to_wire_id) && grpc_setting_id_to_wire_id[h] == wire_id;
 }
 """ % cgargs
 
-print >>H, """
+print >> H, """
 typedef enum {
   GRPC_CHTTP2_CLAMP_INVALID_VALUE,
   GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE
@@ -145,25 +167,22 @@
 
 extern const grpc_chttp2_setting_parameters grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS];
 """
-print >>C, "const grpc_chttp2_setting_parameters grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS] = {"
+print >> C, "const grpc_chttp2_setting_parameters grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS] = {"
 i = 0
 for decorated_setting in sorted(decorated_settings):
-  while i < decorated_setting.enum:
-    print >>C, "{NULL, 0, 0, 0, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_HTTP2_PROTOCOL_ERROR},"
+    while i < decorated_setting.enum:
+        print >> C, "{NULL, 0, 0, 0, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_HTTP2_PROTOCOL_ERROR},"
+        i += 1
+    print >> C, "{\"%s\", %du, %du, %du, GRPC_CHTTP2_%s, GRPC_HTTP2_%s}," % (
+        decorated_setting.name, decorated_setting.setting.default,
+        decorated_setting.setting.min, decorated_setting.setting.max,
+        decorated_setting.setting.on_error.behavior,
+        decorated_setting.setting.on_error.code,)
     i += 1
-  print >>C, "{\"%s\", %du, %du, %du, GRPC_CHTTP2_%s, GRPC_HTTP2_%s}," % (
-    decorated_setting.name,
-    decorated_setting.setting.default,
-    decorated_setting.setting.min,
-    decorated_setting.setting.max,
-    decorated_setting.setting.on_error.behavior,
-    decorated_setting.setting.on_error.code,
-  )
-  i += 1
-print >>C, "};"
+print >> C, "};"
 
-print >>H
-print >>H, "#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H */"
+print >> H
+print >> H, "#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H */"
 
 H.close()
 C.close()
diff --git a/tools/codegen/core/gen_static_metadata.py b/tools/codegen/core/gen_static_metadata.py
index 7847b8e..9d99bbf 100755
--- a/tools/codegen/core/gen_static_metadata.py
+++ b/tools/codegen/core/gen_static_metadata.py
@@ -172,65 +172,66 @@
     'gzip',
 ]
 
+
 # utility: mangle the name of a config
 def mangle(elem, name=None):
-  xl = {
-      '-': '_',
-      ':': '',
-      '/': 'slash',
-      '.': 'dot',
-      ',': 'comma',
-      ' ': '_',
-  }
+    xl = {
+        '-': '_',
+        ':': '',
+        '/': 'slash',
+        '.': 'dot',
+        ',': 'comma',
+        ' ': '_',
+    }
 
-  def m0(x):
-    if not x:
-      return 'empty'
-    r = ''
-    for c in x:
-      put = xl.get(c, c.lower())
-      if not put:
-        continue
-      last_is_underscore = r[-1] == '_' if r else True
-      if last_is_underscore and put == '_':
-        continue
-      elif len(put) > 1:
-        if not last_is_underscore:
-          r += '_'
-        r += put
-        r += '_'
-      else:
-        r += put
-    if r[-1] == '_':
-      r = r[:-1]
-    return r
+    def m0(x):
+        if not x:
+            return 'empty'
+        r = ''
+        for c in x:
+            put = xl.get(c, c.lower())
+            if not put:
+                continue
+            last_is_underscore = r[-1] == '_' if r else True
+            if last_is_underscore and put == '_':
+                continue
+            elif len(put) > 1:
+                if not last_is_underscore:
+                    r += '_'
+                r += put
+                r += '_'
+            else:
+                r += put
+        if r[-1] == '_':
+            r = r[:-1]
+        return r
 
-  def n(default, name=name):
-    if name is None:
-      return 'grpc_%s_' % default
-    if name == '':
-      return ''
-    return 'grpc_%s_' % name
+    def n(default, name=name):
+        if name is None:
+            return 'grpc_%s_' % default
+        if name == '':
+            return ''
+        return 'grpc_%s_' % name
 
-  if isinstance(elem, tuple):
-    return '%s%s_%s' % (n('mdelem'), m0(elem[0]), m0(elem[1]))
-  else:
-    return '%s%s' % (n('mdstr'), m0(elem))
+    if isinstance(elem, tuple):
+        return '%s%s_%s' % (n('mdelem'), m0(elem[0]), m0(elem[1]))
+    else:
+        return '%s%s' % (n('mdstr'), m0(elem))
 
 
 # utility: generate some hash value for a string
 def fake_hash(elem):
-  return hashlib.md5(elem).hexdigest()[0:8]
+    return hashlib.md5(elem).hexdigest()[0:8]
 
 
 # utility: print a big comment block into a set of files
 def put_banner(files, banner):
-  for f in files:
-    print >> f, '/*'
-    for line in banner:
-      print >> f, ' * %s' % line
-    print >> f, ' */'
-    print >> f
+    for f in files:
+        print >> f, '/*'
+        for line in banner:
+            print >> f, ' * %s' % line
+        print >> f, ' */'
+        print >> f
 
 
 # build a list of all the strings we need
@@ -240,43 +241,43 @@
 # put metadata batch callouts first, to make the check of if a static metadata
 # string is a callout trivial
 for elem, _ in METADATA_BATCH_CALLOUTS:
-  if elem not in all_strs:
-    all_strs.append(elem)
-for elem in CONFIG:
-  if isinstance(elem, tuple):
-    if elem[0] not in all_strs:
-      all_strs.append(elem[0])
-    if elem[1] not in all_strs:
-      all_strs.append(elem[1])
-    if elem not in all_elems:
-      all_elems.append(elem)
-  else:
     if elem not in all_strs:
-      all_strs.append(elem)
+        all_strs.append(elem)
+for elem in CONFIG:
+    if isinstance(elem, tuple):
+        if elem[0] not in all_strs:
+            all_strs.append(elem[0])
+        if elem[1] not in all_strs:
+            all_strs.append(elem[1])
+        if elem not in all_elems:
+            all_elems.append(elem)
+    else:
+        if elem not in all_strs:
+            all_strs.append(elem)
 compression_elems = []
 for mask in range(1, 1 << len(COMPRESSION_ALGORITHMS)):
-  val = ','.join(COMPRESSION_ALGORITHMS[alg]
-                 for alg in range(0, len(COMPRESSION_ALGORITHMS))
-                 if (1 << alg) & mask)
-  elem = ('grpc-accept-encoding', val)
-  if val not in all_strs:
-    all_strs.append(val)
-  if elem not in all_elems:
-    all_elems.append(elem)
-  compression_elems.append(elem)
-  static_userdata[elem] = 1 + (mask | 1)
+    val = ','.join(COMPRESSION_ALGORITHMS[alg]
+                   for alg in range(0, len(COMPRESSION_ALGORITHMS))
+                   if (1 << alg) & mask)
+    elem = ('grpc-accept-encoding', val)
+    if val not in all_strs:
+        all_strs.append(val)
+    if elem not in all_elems:
+        all_elems.append(elem)
+    compression_elems.append(elem)
+    static_userdata[elem] = 1 + (mask | 1)
 stream_compression_elems = []
 for mask in range(1, 1 << len(STREAM_COMPRESSION_ALGORITHMS)):
-  val = ','.join(STREAM_COMPRESSION_ALGORITHMS[alg]
-                 for alg in range(0, len(STREAM_COMPRESSION_ALGORITHMS))
-                 if (1 << alg) & mask)
-  elem = ('accept-encoding', val)
-  if val not in all_strs:
-    all_strs.append(val)
-  if elem not in all_elems:
-    all_elems.append(elem)
-  stream_compression_elems.append(elem)
-  static_userdata[elem] = 1 + (mask | 1)
+    val = ','.join(STREAM_COMPRESSION_ALGORITHMS[alg]
+                   for alg in range(0, len(STREAM_COMPRESSION_ALGORITHMS))
+                   if (1 << alg) & mask)
+    elem = ('accept-encoding', val)
+    if val not in all_strs:
+        all_strs.append(val)
+    if elem not in all_elems:
+        all_elems.append(elem)
+    stream_compression_elems.append(elem)
+    static_userdata[elem] = 1 + (mask | 1)
 
 # output configuration
 args = sys.argv[1:]
@@ -284,62 +285,62 @@
 C = None
 D = None
 if args:
-  if 'header' in args:
-    H = sys.stdout
-  else:
-    H = open('/dev/null', 'w')
-  if 'source' in args:
-    C = sys.stdout
-  else:
-    C = open('/dev/null', 'w')
-  if 'dictionary' in args:
-    D = sys.stdout
-  else:
-    D = open('/dev/null', 'w')
+    if 'header' in args:
+        H = sys.stdout
+    else:
+        H = open('/dev/null', 'w')
+    if 'source' in args:
+        C = sys.stdout
+    else:
+        C = open('/dev/null', 'w')
+    if 'dictionary' in args:
+        D = sys.stdout
+    else:
+        D = open('/dev/null', 'w')
 else:
-  H = open(
-      os.path.join(
-          os.path.dirname(sys.argv[0]),
-          '../../../src/core/lib/transport/static_metadata.h'), 'w')
-  C = open(
-      os.path.join(
-          os.path.dirname(sys.argv[0]),
-          '../../../src/core/lib/transport/static_metadata.cc'), 'w')
-  D = open(
-      os.path.join(
-          os.path.dirname(sys.argv[0]),
-          '../../../test/core/end2end/fuzzers/hpack.dictionary'), 'w')
+    H = open(
+        os.path.join(
+            os.path.dirname(sys.argv[0]),
+            '../../../src/core/lib/transport/static_metadata.h'), 'w')
+    C = open(
+        os.path.join(
+            os.path.dirname(sys.argv[0]),
+            '../../../src/core/lib/transport/static_metadata.cc'), 'w')
+    D = open(
+        os.path.join(
+            os.path.dirname(sys.argv[0]),
+            '../../../test/core/end2end/fuzzers/hpack.dictionary'), 'w')
 
 # copy-paste copyright notice from this file
 with open(sys.argv[0]) as my_source:
-  copyright = []
-  for line in my_source:
-    if line[0] != '#':
-      break
-  for line in my_source:
-    if line[0] == '#':
-      copyright.append(line)
-      break
-  for line in my_source:
-    if line[0] != '#':
-      break
-    copyright.append(line)
-  put_banner([H, C], [line[2:].rstrip() for line in copyright])
+    copyright = []
+    for line in my_source:
+        if line[0] != '#':
+            break
+    for line in my_source:
+        if line[0] == '#':
+            copyright.append(line)
+            break
+    for line in my_source:
+        if line[0] != '#':
+            break
+        copyright.append(line)
+    put_banner([H, C], [line[2:].rstrip() for line in copyright])
 
 hex_bytes = [ord(c) for c in 'abcdefABCDEF0123456789']
 
 
 def esc_dict(line):
-  out = "\""
-  for c in line:
-    if 32 <= c < 127:
-      if c != ord('"'):
-        out += chr(c)
-      else:
-        out += "\\\""
-    else:
-      out += '\\x%02X' % c
-  return out + "\""
+    out = "\""
+    for c in line:
+        if 32 <= c < 127:
+            if c != ord('"'):
+                out += chr(c)
+            else:
+                out += "\\\""
+        else:
+            out += '\\x%02X' % c
+    return out + "\""
 
 
 put_banner([H, C], """WARNING: Auto-generated code.
@@ -347,17 +348,13 @@
 To make changes to this file, change
 tools/codegen/core/gen_static_metadata.py, and then re-run it.
 
-See metadata.h for an explanation of the interface here, and metadata.c for
+See metadata.h for an explanation of the interface here, and metadata.cc for
 an explanation of what's going on.
 """.splitlines())
 
 print >> H, '#ifndef GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H'
 print >> H, '#define GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H'
 print >> H
-print >> H, '#ifdef __cplusplus'
-print >> H, 'extern "C" {'
-print >> H, '#endif'
-print >> H
 print >> H, '#include "src/core/lib/transport/metadata.h"'
 print >> H
 
@@ -369,27 +366,26 @@
 str_ofs = 0
 id2strofs = {}
 for i, elem in enumerate(all_strs):
-  id2strofs[i] = str_ofs
-  str_ofs += len(elem)
+    id2strofs[i] = str_ofs
+    str_ofs += len(elem)
 
 
 def slice_def(i):
-  return ('{&grpc_static_metadata_refcounts[%d],'
-          ' {{g_bytes+%d, %d}}}') % (
-      i, id2strofs[i], len(all_strs[i]))
+    return ('{&grpc_static_metadata_refcounts[%d],'
+            ' {{g_bytes+%d, %d}}}') % (i, id2strofs[i], len(all_strs[i]))
 
 
 # validate configuration
 for elem, _ in METADATA_BATCH_CALLOUTS:
-  assert elem in all_strs
+    assert elem in all_strs
 
 print >> H, '#define GRPC_STATIC_MDSTR_COUNT %d' % len(all_strs)
 print >> H, ('extern const grpc_slice '
              'grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];')
 for i, elem in enumerate(all_strs):
-  print >> H, '/* "%s" */' % elem
-  print >> H, '#define %s (grpc_static_slice_table[%d])' % (
-      mangle(elem).upper(), i)
+    print >> H, '/* "%s" */' % elem
+    print >> H, '#define %s (grpc_static_slice_table[%d])' % (
+        mangle(elem).upper(), i)
 print >> H
 print >> C, 'static uint8_t g_bytes[] = {%s};' % (
     ','.join('%d' % ord(c) for c in ''.join(all_strs)))
@@ -411,7 +407,7 @@
 print >> C, ('grpc_slice_refcount '
              'grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {')
 for i, elem in enumerate(all_strs):
-  print >> C, '  {&grpc_static_metadata_vtable, &static_sub_refcnt},'
+    print >> C, '  {&grpc_static_metadata_vtable, &static_sub_refcnt},'
 print >> C, '};'
 print >> C
 print >> H, '#define GRPC_IS_STATIC_METADATA_STRING(slice) \\'
@@ -421,7 +417,7 @@
 print >> C, ('const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT]'
              ' = {')
 for i, elem in enumerate(all_strs):
-  print >> C, slice_def(i) + ','
+    print >> C, slice_def(i) + ','
 print >> C, '};'
 print >> C
 print >> H, '#define GRPC_STATIC_METADATA_INDEX(static_slice) \\'
@@ -431,10 +427,10 @@
 
 print >> D, '# hpack fuzzing dictionary'
 for i, elem in enumerate(all_strs):
-  print >> D, '%s' % (esc_dict([len(elem)] + [ord(c) for c in elem]))
+    print >> D, '%s' % (esc_dict([len(elem)] + [ord(c) for c in elem]))
 for i, elem in enumerate(all_elems):
-  print >> D, '%s' % (esc_dict([0, len(elem[0])] + [ord(c) for c in elem[0]] +
-                               [len(elem[1])] + [ord(c) for c in elem[1]]))
+    print >> D, '%s' % (esc_dict([0, len(elem[0])] + [ord(c) for c in elem[0]] +
+                                 [len(elem[1])] + [ord(c) for c in elem[1]]))
 
 print >> H, '#define GRPC_STATIC_MDELEM_COUNT %d' % len(all_elems)
 print >> H, ('extern grpc_mdelem_data '
@@ -442,10 +438,9 @@
 print >> H, ('extern uintptr_t '
              'grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];')
 for i, elem in enumerate(all_elems):
-  print >> H, '/* "%s": "%s" */' % elem
-  print >> H, ('#define %s (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[%d], '
-               'GRPC_MDELEM_STORAGE_STATIC))') % (
-      mangle(elem).upper(), i)
+    print >> H, '/* "%s": "%s" */' % elem
+    print >> H, ('#define %s (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[%d], '
+                 'GRPC_MDELEM_STORAGE_STATIC))') % (mangle(elem).upper(), i)
 print >> H
 print >> C, ('uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] '
              '= {')
@@ -456,42 +451,38 @@
 
 
 def str_idx(s):
-  for i, s2 in enumerate(all_strs):
-    if s == s2:
-      return i
+    for i, s2 in enumerate(all_strs):
+        if s == s2:
+            return i
 
 
 def md_idx(m):
-  for i, m2 in enumerate(all_elems):
-    if m == m2:
-      return i
+    for i, m2 in enumerate(all_elems):
+        if m == m2:
+            return i
 
 
 def offset_trials(mink):
-  yield 0
-  for i in range(1, 100):
-    for mul in [-1, 1]:
-      yield mul * i
+    yield 0
+    for i in range(1, 100):
+        for mul in [-1, 1]:
+            yield mul * i
 
 
 def perfect_hash(keys, name):
-  p = perfection.hash_parameters(keys)
+    p = perfection.hash_parameters(keys)
 
-  def f(i, p=p):
-    i += p.offset
-    x = i % p.t
-    y = i / p.t
-    return x + p.r[y]
+    def f(i, p=p):
+        i += p.offset
+        x = i % p.t
+        y = i / p.t
+        return x + p.r[y]
 
-  return {
-      'PHASHRANGE':
-          p.t - 1 + max(p.r),
-      'PHASHNKEYS':
-          len(p.slots),
-      'pyfunc':
-          f,
-      'code':
-          """
+    return {
+        'PHASHRANGE': p.t - 1 + max(p.r),
+        'PHASHNKEYS': len(p.slots),
+        'pyfunc': f,
+        'code': """
 static const int8_t %(name)s_r[] = {%(r)s};
 static uint32_t %(name)s_phash(uint32_t i) {
   i %(offset_sign)s= %(offset)d;
@@ -505,13 +496,13 @@
   return h;
 }
     """ % {
-        'name': name,
-        'r': ','.join('%d' % (r if r is not None else 0) for r in p.r),
-        't': p.t,
-        'offset': abs(p.offset),
-        'offset_sign': '+' if p.offset > 0 else '-'
+            'name': name,
+            'r': ','.join('%d' % (r if r is not None else 0) for r in p.r),
+            't': p.t,
+            'offset': abs(p.offset),
+            'offset_sign': '+' if p.offset > 0 else '-'
+        }
     }
-  }
 
 
 elem_keys = [
@@ -523,14 +514,14 @@
 keys = [0] * int(elem_hash['PHASHRANGE'])
 idxs = [255] * int(elem_hash['PHASHNKEYS'])
 for i, k in enumerate(elem_keys):
-  h = elem_hash['pyfunc'](k)
-  assert keys[h] == 0
-  keys[h] = k
-  idxs[h] = i
+    h = elem_hash['pyfunc'](k)
+    assert keys[h] == 0
+    keys[h] = k
+    idxs[h] = i
 print >> C, 'static const uint16_t elem_keys[] = {%s};' % ','.join(
     '%d' % k for k in keys)
-print >> C, 'static const uint8_t elem_idxs[] = {%s};' % ','.join(
-    '%d' % i for i in idxs)
+print >> C, 'static const uint8_t elem_idxs[] = {%s};' % ','.join('%d' % i
+                                                                  for i in idxs)
 print >> C
 
 print >> H, 'grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b);'
@@ -544,12 +535,12 @@
 
 print >> C, 'grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {'
 for a, b in all_elems:
-  print >> C, '{%s,%s},' % (slice_def(str_idx(a)), slice_def(str_idx(b)))
+    print >> C, '{%s,%s},' % (slice_def(str_idx(a)), slice_def(str_idx(b)))
 print >> C, '};'
 
 print >> H, 'typedef enum {'
 for elem, _ in METADATA_BATCH_CALLOUTS:
-  print >> H, '  %s,' % mangle(elem, 'batch').upper()
+    print >> H, '  %s,' % mangle(elem, 'batch').upper()
 print >> H, '  GRPC_BATCH_CALLOUTS_COUNT'
 print >> H, '} grpc_metadata_batch_callouts_index;'
 print >> H
@@ -557,7 +548,7 @@
 print >> H, '  struct grpc_linked_mdelem *array[GRPC_BATCH_CALLOUTS_COUNT];'
 print >> H, '  struct {'
 for elem, _ in METADATA_BATCH_CALLOUTS:
-  print >> H, '  struct grpc_linked_mdelem *%s;' % mangle(elem, '').lower()
+    print >> H, '  struct grpc_linked_mdelem *%s;' % mangle(elem, '').lower()
 print >> H, '  } named;'
 print >> H, '} grpc_metadata_batch_callouts;'
 print >> H
@@ -569,7 +560,7 @@
 print >> H
 print >> C, 'bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = {'
 for elem, is_default in METADATA_BATCH_CALLOUTS:
-  print >> C, '  %s, // %s' % (str(is_default).lower(), elem)
+    print >> C, '  %s, // %s' % (str(is_default).lower(), elem)
 print >> C, '};'
 print >> C
 
@@ -588,15 +579,12 @@
     1 << len(STREAM_COMPRESSION_ALGORITHMS))
 print >> C, 'const uint8_t grpc_static_accept_stream_encoding_metadata[%d] = {' % (
     1 << len(STREAM_COMPRESSION_ALGORITHMS))
-print >> C, '0,%s' % ','.join('%d' % md_idx(elem) for elem in stream_compression_elems)
+print >> C, '0,%s' % ','.join('%d' % md_idx(elem)
+                              for elem in stream_compression_elems)
 print >> C, '};'
 
 print >> H, '#define GRPC_MDELEM_ACCEPT_STREAM_ENCODING_FOR_ALGORITHMS(algs) (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[grpc_static_accept_stream_encoding_metadata[(algs)]], GRPC_MDELEM_STORAGE_STATIC))'
 
-print >> H, '#ifdef __cplusplus'
-print >> H, '}'
-print >> H, '#endif'
-
 print >> H, '#endif /* GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H */'
 
 H.close()
diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py
index d439d99..93f2035 100755
--- a/tools/codegen/core/gen_stats_data.py
+++ b/tools/codegen/core/gen_stats_data.py
@@ -22,397 +22,436 @@
 import json
 
 with open('src/core/lib/debug/stats_data.yaml') as f:
-  attrs = yaml.load(f.read())
+    attrs = yaml.load(f.read())
 
 REQUIRED_FIELDS = ['name', 'doc']
 
+
 def make_type(name, fields):
-  return (collections.namedtuple(name, ' '.join(list(set(REQUIRED_FIELDS + fields)))), [])
+    return (collections.namedtuple(
+        name, ' '.join(list(set(REQUIRED_FIELDS + fields)))), [])
+
 
 def c_str(s, encoding='ascii'):
-   if isinstance(s, unicode):
-      s = s.encode(encoding)
-   result = ''
-   for c in s:
-      if not (32 <= ord(c) < 127) or c in ('\\', '"'):
-         result += '\\%03o' % ord(c)
-      else:
-         result += c
-   return '"' + result + '"'
+    if isinstance(s, unicode):
+        s = s.encode(encoding)
+    result = ''
+    for c in s:
+        if not (32 <= ord(c) < 127) or c in ('\\', '"'):
+            result += '\\%03o' % ord(c)
+        else:
+            result += c
+    return '"' + result + '"'
 
-types = (
-  make_type('Counter', []),
-  make_type('Histogram', ['max', 'buckets']),
-)
+
+types = (make_type('Counter', []), make_type('Histogram', ['max', 'buckets']),)
 
 inst_map = dict((t[0].__name__, t[1]) for t in types)
 
 stats = []
 
 for attr in attrs:
-  found = False
-  for t, lst in types:
-    t_name = t.__name__.lower()
-    if t_name in attr:
-      name = attr[t_name]
-      del attr[t_name]
-      lst.append(t(name=name, **attr))
-      found = True
-      break
-  assert found, "Bad decl: %s" % attr
+    found = False
+    for t, lst in types:
+        t_name = t.__name__.lower()
+        if t_name in attr:
+            name = attr[t_name]
+            del attr[t_name]
+            lst.append(t(name=name, **attr))
+            found = True
+            break
+    assert found, "Bad decl: %s" % attr
+
 
 def dbl2u64(d):
-  return ctypes.c_ulonglong.from_buffer(ctypes.c_double(d)).value
+    return ctypes.c_ulonglong.from_buffer(ctypes.c_double(d)).value
+
 
 def shift_works_until(mapped_bounds, shift_bits):
-  for i, ab in enumerate(zip(mapped_bounds, mapped_bounds[1:])):
-    a, b = ab
-    if (a >> shift_bits) == (b >> shift_bits):
-      return i
-  return len(mapped_bounds)
+    for i, ab in enumerate(zip(mapped_bounds, mapped_bounds[1:])):
+        a, b = ab
+        if (a >> shift_bits) == (b >> shift_bits):
+            return i
+    return len(mapped_bounds)
+
 
 def find_ideal_shift(mapped_bounds, max_size):
-  best = None
-  for shift_bits in reversed(range(0,64)):
-    n = shift_works_until(mapped_bounds, shift_bits)
-    if n == 0: continue
-    table_size = mapped_bounds[n-1] >> shift_bits
-    if table_size > max_size: continue
-    if table_size > 65535: continue
-    if best is None:
-      best = (shift_bits, n, table_size)
-    elif best[1] < n:
-      best = (shift_bits, n, table_size)
-  print best
-  return best
+    best = None
+    for shift_bits in reversed(range(0, 64)):
+        n = shift_works_until(mapped_bounds, shift_bits)
+        if n == 0: continue
+        table_size = mapped_bounds[n - 1] >> shift_bits
+        if table_size > max_size: continue
+        if table_size > 65535: continue
+        if best is None:
+            best = (shift_bits, n, table_size)
+        elif best[1] < n:
+            best = (shift_bits, n, table_size)
+    print best
+    return best
+
 
 def gen_map_table(mapped_bounds, shift_data):
-  tbl = []
-  cur = 0
-  print mapped_bounds
-  mapped_bounds = [x >> shift_data[0] for x in mapped_bounds]
-  print mapped_bounds
-  for i in range(0, mapped_bounds[shift_data[1]-1]):
-    while i > mapped_bounds[cur]:
-      cur += 1
-    tbl.append(cur)
-  return tbl
+    tbl = []
+    cur = 0
+    print mapped_bounds
+    mapped_bounds = [x >> shift_data[0] for x in mapped_bounds]
+    print mapped_bounds
+    for i in range(0, mapped_bounds[shift_data[1] - 1]):
+        while i > mapped_bounds[cur]:
+            cur += 1
+        tbl.append(cur)
+    return tbl
+
 
 static_tables = []
 
+
 def decl_static_table(values, type):
-  global static_tables
-  v = (type, values)
-  for i, vp in enumerate(static_tables):
-    if v == vp: return i
-  print "ADD TABLE: %s %r" % (type, values)
-  r = len(static_tables)
-  static_tables.append(v)
-  return r
+    global static_tables
+    v = (type, values)
+    for i, vp in enumerate(static_tables):
+        if v == vp: return i
+    print "ADD TABLE: %s %r" % (type, values)
+    r = len(static_tables)
+    static_tables.append(v)
+    return r
+
 
 def type_for_uint_table(table):
-  mv = max(table)
-  if mv < 2**8:
-    return 'uint8_t'
-  elif mv < 2**16:
-    return 'uint16_t'
-  elif mv < 2**32:
-    return 'uint32_t'
-  else:
-    return 'uint64_t'
+    mv = max(table)
+    if mv < 2**8:
+        return 'uint8_t'
+    elif mv < 2**16:
+        return 'uint16_t'
+    elif mv < 2**32:
+        return 'uint32_t'
+    else:
+        return 'uint64_t'
+
 
 def gen_bucket_code(histogram):
-  bounds = [0, 1]
-  done_trivial = False
-  done_unmapped = False
-  first_nontrivial = None
-  first_unmapped = None
-  while len(bounds) < histogram.buckets + 1:
-    if len(bounds) == histogram.buckets:
-      nextb = int(histogram.max)
+    bounds = [0, 1]
+    done_trivial = False
+    done_unmapped = False
+    first_nontrivial = None
+    first_unmapped = None
+    while len(bounds) < histogram.buckets + 1:
+        if len(bounds) == histogram.buckets:
+            nextb = int(histogram.max)
+        else:
+            mul = math.pow(
+                float(histogram.max) / bounds[-1],
+                1.0 / (histogram.buckets + 1 - len(bounds)))
+            nextb = int(math.ceil(bounds[-1] * mul))
+        if nextb <= bounds[-1] + 1:
+            nextb = bounds[-1] + 1
+        elif not done_trivial:
+            done_trivial = True
+            first_nontrivial = len(bounds)
+        bounds.append(nextb)
+    bounds_idx = decl_static_table(bounds, 'int')
+    if done_trivial:
+        first_nontrivial_code = dbl2u64(first_nontrivial)
+        code_bounds = [dbl2u64(x) - first_nontrivial_code for x in bounds]
+        shift_data = find_ideal_shift(code_bounds[first_nontrivial:],
+                                      256 * histogram.buckets)
+    #print first_nontrivial, shift_data, bounds
+    #if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]]
+    code = 'value = GPR_CLAMP(value, 0, %d);\n' % histogram.max
+    map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data)
+    if first_nontrivial is None:
+        code += ('GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, value);\n' %
+                 histogram.name.upper())
     else:
-      mul = math.pow(float(histogram.max) / bounds[-1],
-                     1.0 / (histogram.buckets + 1 - len(bounds)))
-      nextb = int(math.ceil(bounds[-1] * mul))
-    if nextb <= bounds[-1] + 1:
-      nextb = bounds[-1] + 1
-    elif not done_trivial:
-      done_trivial = True
-      first_nontrivial = len(bounds)
-    bounds.append(nextb)
-  bounds_idx = decl_static_table(bounds, 'int')
-  if done_trivial:
-    first_nontrivial_code = dbl2u64(first_nontrivial)
-    code_bounds = [dbl2u64(x) - first_nontrivial_code for x in bounds]
-    shift_data = find_ideal_shift(code_bounds[first_nontrivial:], 256 * histogram.buckets)
-  #print first_nontrivial, shift_data, bounds
-  #if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]]
-  code = 'value = GPR_CLAMP(value, 0, %d);\n' % histogram.max
-  map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data)
-  if first_nontrivial is None:
-    code += ('GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, value);\n'
-             % histogram.name.upper())
-  else:
-    code += 'if (value < %d) {\n' % first_nontrivial
-    code += ('GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, value);\n'
-             % histogram.name.upper())
-    code += 'return;\n'
-    code += '}'
-    first_nontrivial_code = dbl2u64(first_nontrivial)
-    if shift_data is not None:
-      map_table_idx = decl_static_table(map_table, type_for_uint_table(map_table))
-      code += 'union { double dbl; uint64_t uint; } _val, _bkt;\n'
-      code += '_val.dbl = value;\n'
-      code += 'if (_val.uint < %dull) {\n' % ((map_table[-1] << shift_data[0]) + first_nontrivial_code)
-      code += 'int bucket = '
-      code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)] + %d;\n' % (map_table_idx, first_nontrivial_code, shift_data[0], first_nontrivial)
-      code += '_bkt.dbl = grpc_stats_table_%d[bucket];\n' % bounds_idx
-      code += 'bucket -= (_val.uint < _bkt.uint);\n'
-      code += 'GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, bucket);\n' % histogram.name.upper()
-      code += 'return;\n'
-      code += '}\n'
-    code += 'GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, '% histogram.name.upper()
-    code += 'grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_%d, %d));\n' % (bounds_idx, histogram.buckets)
-  return (code, bounds_idx)
+        code += 'if (value < %d) {\n' % first_nontrivial
+        code += ('GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, value);\n' %
+                 histogram.name.upper())
+        code += 'return;\n'
+        code += '}'
+        first_nontrivial_code = dbl2u64(first_nontrivial)
+        if shift_data is not None:
+            map_table_idx = decl_static_table(map_table,
+                                              type_for_uint_table(map_table))
+            code += 'union { double dbl; uint64_t uint; } _val, _bkt;\n'
+            code += '_val.dbl = value;\n'
+            code += 'if (_val.uint < %dull) {\n' % (
+                (map_table[-1] << shift_data[0]) + first_nontrivial_code)
+            code += 'int bucket = '
+            code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)] + %d;\n' % (
+                map_table_idx, first_nontrivial_code, shift_data[0],
+                first_nontrivial)
+            code += '_bkt.dbl = grpc_stats_table_%d[bucket];\n' % bounds_idx
+            code += 'bucket -= (_val.uint < _bkt.uint);\n'
+            code += 'GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, bucket);\n' % histogram.name.upper(
+            )
+            code += 'return;\n'
+            code += '}\n'
+        code += 'GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, ' % histogram.name.upper(
+        )
+        code += 'grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_%d, %d));\n' % (
+            bounds_idx, histogram.buckets)
+    return (code, bounds_idx)
+
 
 # utility: print a big comment block into a set of files
 def put_banner(files, banner):
-  for f in files:
-    print >>f, '/*'
-    for line in banner:
-      print >>f, ' * %s' % line
-    print >>f, ' */'
-    print >>f
+    for f in files:
+        print >> f, '/*'
+        for line in banner:
+            print >> f, ' * %s' % line
+        print >> f, ' */'
+        print >> f
+
 
 with open('src/core/lib/debug/stats_data.h', 'w') as H:
-  # copy-paste copyright notice from this file
-  with open(sys.argv[0]) as my_source:
-    copyright = []
-    for line in my_source:
-      if line[0] != '#': break
-    for line in my_source:
-      if line[0] == '#':
-        copyright.append(line)
-        break
-    for line in my_source:
-      if line[0] != '#':
-        break
-      copyright.append(line)
-    put_banner([H], [line[2:].rstrip() for line in copyright])
+    # copy-paste copyright notice from this file
+    with open(sys.argv[0]) as my_source:
+        copyright = []
+        for line in my_source:
+            if line[0] != '#': break
+        for line in my_source:
+            if line[0] == '#':
+                copyright.append(line)
+                break
+        for line in my_source:
+            if line[0] != '#':
+                break
+            copyright.append(line)
+        put_banner([H], [line[2:].rstrip() for line in copyright])
 
-  put_banner([H], ["Automatically generated by tools/codegen/core/gen_stats_data.py"])
+    put_banner(
+        [H],
+        ["Automatically generated by tools/codegen/core/gen_stats_data.py"])
 
-  print >>H, "#ifndef GRPC_CORE_LIB_DEBUG_STATS_DATA_H"
-  print >>H, "#define GRPC_CORE_LIB_DEBUG_STATS_DATA_H"
-  print >>H
-  print >>H, "#include <inttypes.h>"
-  print >>H, "#include \"src/core/lib/iomgr/exec_ctx.h\""
-  print >>H
-  print >>H, "#ifdef __cplusplus"
-  print >>H, "extern \"C\" {"
-  print >>H, "#endif"
-  print >>H
+    print >> H, "#ifndef GRPC_CORE_LIB_DEBUG_STATS_DATA_H"
+    print >> H, "#define GRPC_CORE_LIB_DEBUG_STATS_DATA_H"
+    print >> H
+    print >> H, "#include <inttypes.h>"
+    print >> H, "#include \"src/core/lib/iomgr/exec_ctx.h\""
+    print >> H
+    print >> H, "#ifdef __cplusplus"
+    print >> H, "extern \"C\" {"
+    print >> H, "#endif"
+    print >> H
 
-  for typename, instances in sorted(inst_map.items()):
-    print >>H, "typedef enum {"
-    for inst in instances:
-      print >>H, "  GRPC_STATS_%s_%s," % (typename.upper(), inst.name.upper())
-    print >>H, "  GRPC_STATS_%s_COUNT" % (typename.upper())
-    print >>H, "} grpc_stats_%ss;" % (typename.lower())
-    print >>H, "extern const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT];" % (
-        typename.lower(), typename.upper())
-    print >>H, "extern const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT];" % (
-        typename.lower(), typename.upper())
+    for typename, instances in sorted(inst_map.items()):
+        print >> H, "typedef enum {"
+        for inst in instances:
+            print >> H, "  GRPC_STATS_%s_%s," % (typename.upper(),
+                                                 inst.name.upper())
+        print >> H, "  GRPC_STATS_%s_COUNT" % (typename.upper())
+        print >> H, "} grpc_stats_%ss;" % (typename.lower())
+        print >> H, "extern const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT];" % (
+            typename.lower(), typename.upper())
+        print >> H, "extern const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT];" % (
+            typename.lower(), typename.upper())
 
-  histo_start = []
-  histo_buckets = []
-  histo_bucket_boundaries = []
+    histo_start = []
+    histo_buckets = []
+    histo_bucket_boundaries = []
 
-  print >>H, "typedef enum {"
-  first_slot = 0
-  for histogram in inst_map['Histogram']:
-    histo_start.append(first_slot)
-    histo_buckets.append(histogram.buckets)
-    print >>H, "  GRPC_STATS_HISTOGRAM_%s_FIRST_SLOT = %d," % (histogram.name.upper(), first_slot)
-    print >>H, "  GRPC_STATS_HISTOGRAM_%s_BUCKETS = %d," % (histogram.name.upper(), histogram.buckets)
-    first_slot += histogram.buckets
-  print >>H, "  GRPC_STATS_HISTOGRAM_BUCKETS = %d" % first_slot
-  print >>H, "} grpc_stats_histogram_constants;"
+    print >> H, "typedef enum {"
+    first_slot = 0
+    for histogram in inst_map['Histogram']:
+        histo_start.append(first_slot)
+        histo_buckets.append(histogram.buckets)
+        print >> H, "  GRPC_STATS_HISTOGRAM_%s_FIRST_SLOT = %d," % (
+            histogram.name.upper(), first_slot)
+        print >> H, "  GRPC_STATS_HISTOGRAM_%s_BUCKETS = %d," % (
+            histogram.name.upper(), histogram.buckets)
+        first_slot += histogram.buckets
+    print >> H, "  GRPC_STATS_HISTOGRAM_BUCKETS = %d" % first_slot
+    print >> H, "} grpc_stats_histogram_constants;"
 
-  for ctr in inst_map['Counter']:
-    print >>H, ("#define GRPC_STATS_INC_%s() " +
-                "GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_%s)") % (
-                ctr.name.upper(), ctr.name.upper())
-  for histogram in inst_map['Histogram']:
-    print >>H, "#define GRPC_STATS_INC_%s(value) grpc_stats_inc_%s( (int)(value))" % (
-        histogram.name.upper(), histogram.name.lower())
-    print >>H, "void grpc_stats_inc_%s(int x);" % histogram.name.lower()
+    for ctr in inst_map['Counter']:
+        print >> H, ("#define GRPC_STATS_INC_%s() " +
+                     "GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_%s)") % (
+                         ctr.name.upper(), ctr.name.upper())
+    for histogram in inst_map['Histogram']:
+        print >> H, "#define GRPC_STATS_INC_%s(value) grpc_stats_inc_%s( (int)(value))" % (
+            histogram.name.upper(), histogram.name.lower())
+        print >> H, "void grpc_stats_inc_%s(int x);" % histogram.name.lower()
 
-  for i, tbl in enumerate(static_tables):
-    print >>H, "extern const %s grpc_stats_table_%d[%d];" % (tbl[0], i, len(tbl[1]))
+    for i, tbl in enumerate(static_tables):
+        print >> H, "extern const %s grpc_stats_table_%d[%d];" % (tbl[0], i,
+                                                                  len(tbl[1]))
 
-  print >>H, "extern const int grpc_stats_histo_buckets[%d];" % len(inst_map['Histogram'])
-  print >>H, "extern const int grpc_stats_histo_start[%d];" % len(inst_map['Histogram'])
-  print >>H, "extern const int *const grpc_stats_histo_bucket_boundaries[%d];" % len(inst_map['Histogram'])
-  print >>H, "extern void (*const grpc_stats_inc_histogram[%d])(int x);" % len(inst_map['Histogram'])
+    print >> H, "extern const int grpc_stats_histo_buckets[%d];" % len(
+        inst_map['Histogram'])
+    print >> H, "extern const int grpc_stats_histo_start[%d];" % len(
+        inst_map['Histogram'])
+    print >> H, "extern const int *const grpc_stats_histo_bucket_boundaries[%d];" % len(
+        inst_map['Histogram'])
+    print >> H, "extern void (*const grpc_stats_inc_histogram[%d])(int x);" % len(
+        inst_map['Histogram'])
 
-  print >>H
-  print >>H, "#ifdef __cplusplus"
-  print >>H, "}"
-  print >>H, "#endif"
-  print >>H
-  print >>H, "#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */"
+    print >> H
+    print >> H, "#ifdef __cplusplus"
+    print >> H, "}"
+    print >> H, "#endif"
+    print >> H
+    print >> H, "#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */"
 
 with open('src/core/lib/debug/stats_data.cc', 'w') as C:
-  # copy-paste copyright notice from this file
-  with open(sys.argv[0]) as my_source:
-    copyright = []
-    for line in my_source:
-      if line[0] != '#': break
-    for line in my_source:
-      if line[0] == '#':
-        copyright.append(line)
-        break
-    for line in my_source:
-      if line[0] != '#':
-        break
-      copyright.append(line)
-    put_banner([C], [line[2:].rstrip() for line in copyright])
+    # copy-paste copyright notice from this file
+    with open(sys.argv[0]) as my_source:
+        copyright = []
+        for line in my_source:
+            if line[0] != '#': break
+        for line in my_source:
+            if line[0] == '#':
+                copyright.append(line)
+                break
+        for line in my_source:
+            if line[0] != '#':
+                break
+            copyright.append(line)
+        put_banner([C], [line[2:].rstrip() for line in copyright])
 
-  put_banner([C], ["Automatically generated by tools/codegen/core/gen_stats_data.py"])
+    put_banner(
+        [C],
+        ["Automatically generated by tools/codegen/core/gen_stats_data.py"])
 
-  print >>C, "#include \"src/core/lib/debug/stats_data.h\""
-  print >>C, "#include \"src/core/lib/debug/stats.h\""
-  print >>C, "#include \"src/core/lib/iomgr/exec_ctx.h\""
-  print >>C, "#include <grpc/support/useful.h>"
+    print >> C, "#include \"src/core/lib/debug/stats_data.h\""
+    print >> C, "#include \"src/core/lib/debug/stats.h\""
+    print >> C, "#include \"src/core/lib/iomgr/exec_ctx.h\""
+    print >> C, "#include <grpc/support/useful.h>"
 
-  histo_code = []
-  for histogram in inst_map['Histogram']:
-    code, bounds_idx = gen_bucket_code(histogram)
-    histo_bucket_boundaries.append(bounds_idx)
-    histo_code.append(code)
+    histo_code = []
+    for histogram in inst_map['Histogram']:
+        code, bounds_idx = gen_bucket_code(histogram)
+        histo_bucket_boundaries.append(bounds_idx)
+        histo_code.append(code)
 
-  for typename, instances in sorted(inst_map.items()):
-    print >>C, "const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT] = {" % (
-        typename.lower(), typename.upper())
-    for inst in instances:
-      print >>C, "  %s," % c_str(inst.name)
-    print >>C, "};"
-    print >>C, "const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT] = {" % (
-        typename.lower(), typename.upper())
-    for inst in instances:
-      print >>C, "  %s," % c_str(inst.doc)
-    print >>C, "};"
+    for typename, instances in sorted(inst_map.items()):
+        print >> C, "const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT] = {" % (
+            typename.lower(), typename.upper())
+        for inst in instances:
+            print >> C, "  %s," % c_str(inst.name)
+        print >> C, "};"
+        print >> C, "const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT] = {" % (
+            typename.lower(), typename.upper())
+        for inst in instances:
+            print >> C, "  %s," % c_str(inst.doc)
+        print >> C, "};"
 
-  for i, tbl in enumerate(static_tables):
-    print >>C, "const %s grpc_stats_table_%d[%d] = {%s};" % (
-        tbl[0], i, len(tbl[1]), ','.join('%s' % x for x in tbl[1]))
+    for i, tbl in enumerate(static_tables):
+        print >> C, "const %s grpc_stats_table_%d[%d] = {%s};" % (
+            tbl[0], i, len(tbl[1]), ','.join('%s' % x for x in tbl[1]))
 
-  for histogram, code in zip(inst_map['Histogram'], histo_code):
-    print >>C, ("void grpc_stats_inc_%s(int value) {%s}") % (
-                histogram.name.lower(),
-                code)
+    for histogram, code in zip(inst_map['Histogram'], histo_code):
+        print >> C, ("void grpc_stats_inc_%s(int value) {%s}") % (
+            histogram.name.lower(), code)
 
-  print >>C, "const int grpc_stats_histo_buckets[%d] = {%s};" % (
-      len(inst_map['Histogram']), ','.join('%s' % x for x in histo_buckets))
-  print >>C, "const int grpc_stats_histo_start[%d] = {%s};" % (
-      len(inst_map['Histogram']), ','.join('%s' % x for x in histo_start))
-  print >>C, "const int *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" % (
-      len(inst_map['Histogram']), ','.join('grpc_stats_table_%d' % x for x in histo_bucket_boundaries))
-  print >>C, "void (*const grpc_stats_inc_histogram[%d])(int x) = {%s};" % (
-      len(inst_map['Histogram']), ','.join('grpc_stats_inc_%s' % histogram.name.lower() for histogram in inst_map['Histogram']))
+    print >> C, "const int grpc_stats_histo_buckets[%d] = {%s};" % (
+        len(inst_map['Histogram']), ','.join('%s' % x for x in histo_buckets))
+    print >> C, "const int grpc_stats_histo_start[%d] = {%s};" % (
+        len(inst_map['Histogram']), ','.join('%s' % x for x in histo_start))
+    print >> C, "const int *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" % (
+        len(inst_map['Histogram']), ','.join('grpc_stats_table_%d' % x
+                                             for x in histo_bucket_boundaries))
+    print >> C, "void (*const grpc_stats_inc_histogram[%d])(int x) = {%s};" % (
+        len(inst_map['Histogram']), ','.join(
+            'grpc_stats_inc_%s' % histogram.name.lower()
+            for histogram in inst_map['Histogram']))
 
 # patch qps_test bigquery schema
 RECORD_EXPLICIT_PERCENTILES = [50, 95, 99]
 
 with open('tools/run_tests/performance/scenario_result_schema.json', 'r') as f:
-  qps_schema = json.loads(f.read())
+    qps_schema = json.loads(f.read())
+
 
 def FindNamed(js, name):
-  for el in js:
-    if el['name'] == name:
-      return el
+    for el in js:
+        if el['name'] == name:
+            return el
+
 
 def RemoveCoreFields(js):
-  new_fields = []
-  for field in js['fields']:
-    if not field['name'].startswith('core_'):
-      new_fields.append(field)
-  js['fields'] = new_fields
+    new_fields = []
+    for field in js['fields']:
+        if not field['name'].startswith('core_'):
+            new_fields.append(field)
+    js['fields'] = new_fields
+
 
 RemoveCoreFields(FindNamed(qps_schema, 'clientStats'))
 RemoveCoreFields(FindNamed(qps_schema, 'serverStats'))
 
+
 def AddCoreFields(js):
-  for counter in inst_map['Counter']:
-    js['fields'].append({
-      'name': 'core_%s' % counter.name,
-      'type': 'INTEGER',
-      'mode': 'NULLABLE'
-    })
-  for histogram in inst_map['Histogram']:
-    js['fields'].append({
-      'name': 'core_%s' % histogram.name,
-      'type': 'STRING',
-      'mode': 'NULLABLE'
-    })
-    js['fields'].append({
-      'name': 'core_%s_bkts' % histogram.name,
-      'type': 'STRING',
-      'mode': 'NULLABLE'
-    })
-    for pctl in RECORD_EXPLICIT_PERCENTILES:
-      js['fields'].append({
-        'name': 'core_%s_%dp' % (histogram.name, pctl),
-        'type': 'FLOAT',
-        'mode': 'NULLABLE'
-      })
+    for counter in inst_map['Counter']:
+        js['fields'].append({
+            'name': 'core_%s' % counter.name,
+            'type': 'INTEGER',
+            'mode': 'NULLABLE'
+        })
+    for histogram in inst_map['Histogram']:
+        js['fields'].append({
+            'name': 'core_%s' % histogram.name,
+            'type': 'STRING',
+            'mode': 'NULLABLE'
+        })
+        js['fields'].append({
+            'name': 'core_%s_bkts' % histogram.name,
+            'type': 'STRING',
+            'mode': 'NULLABLE'
+        })
+        for pctl in RECORD_EXPLICIT_PERCENTILES:
+            js['fields'].append({
+                'name': 'core_%s_%dp' % (histogram.name, pctl),
+                'type': 'FLOAT',
+                'mode': 'NULLABLE'
+            })
+
 
 AddCoreFields(FindNamed(qps_schema, 'clientStats'))
 AddCoreFields(FindNamed(qps_schema, 'serverStats'))
 
 with open('tools/run_tests/performance/scenario_result_schema.json', 'w') as f:
-  f.write(json.dumps(qps_schema, indent=2, sort_keys=True))
+    f.write(json.dumps(qps_schema, indent=2, sort_keys=True))
 
 # and generate a helper script to massage scenario results into the format we'd
 # like to query
 with open('tools/run_tests/performance/massage_qps_stats.py', 'w') as P:
-  with open(sys.argv[0]) as my_source:
-    for line in my_source:
-      if line[0] != '#': break
-    for line in my_source:
-      if line[0] == '#':
-        print >>P, line.rstrip()
-        break
-    for line in my_source:
-      if line[0] != '#':
-        break
-      print >>P, line.rstrip()
+    with open(sys.argv[0]) as my_source:
+        for line in my_source:
+            if line[0] != '#': break
+        for line in my_source:
+            if line[0] == '#':
+                print >> P, line.rstrip()
+                break
+        for line in my_source:
+            if line[0] != '#':
+                break
+            print >> P, line.rstrip()
 
-  print >>P
-  print >>P, '# Autogenerated by tools/codegen/core/gen_stats_data.py'
-  print >>P
+    print >> P
+    print >> P, '# Autogenerated by tools/codegen/core/gen_stats_data.py'
+    print >> P
 
-  print >>P, 'import massage_qps_stats_helpers'
+    print >> P, 'import massage_qps_stats_helpers'
 
-  print >>P, 'def massage_qps_stats(scenario_result):'
-  print >>P, '  for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:'
-  print >>P, '    if "coreStats" not in stats: return'
-  print >>P, '    core_stats = stats["coreStats"]'
-  print >>P, '    del stats["coreStats"]'
-  for counter in inst_map['Counter']:
-    print >>P, '    stats["core_%s"] = massage_qps_stats_helpers.counter(core_stats, "%s")' % (counter.name, counter.name)
-  for i, histogram in enumerate(inst_map['Histogram']):
-    print >>P, '    h = massage_qps_stats_helpers.histogram(core_stats, "%s")' % histogram.name
-    print >>P, '    stats["core_%s"] = ",".join("%%f" %% x for x in h.buckets)' % histogram.name
-    print >>P, '    stats["core_%s_bkts"] = ",".join("%%f" %% x for x in h.boundaries)' % histogram.name
-    for pctl in RECORD_EXPLICIT_PERCENTILES:
-      print >>P, '    stats["core_%s_%dp"] = massage_qps_stats_helpers.percentile(h.buckets, %d, h.boundaries)' % (
-          histogram.name, pctl, pctl)
+    print >> P, 'def massage_qps_stats(scenario_result):'
+    print >> P, '  for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:'
+    print >> P, '    if "coreStats" not in stats: return'
+    print >> P, '    core_stats = stats["coreStats"]'
+    print >> P, '    del stats["coreStats"]'
+    for counter in inst_map['Counter']:
+        print >> P, '    stats["core_%s"] = massage_qps_stats_helpers.counter(core_stats, "%s")' % (
+            counter.name, counter.name)
+    for i, histogram in enumerate(inst_map['Histogram']):
+        print >> P, '    h = massage_qps_stats_helpers.histogram(core_stats, "%s")' % histogram.name
+        print >> P, '    stats["core_%s"] = ",".join("%%f" %% x for x in h.buckets)' % histogram.name
+        print >> P, '    stats["core_%s_bkts"] = ",".join("%%f" %% x for x in h.boundaries)' % histogram.name
+        for pctl in RECORD_EXPLICIT_PERCENTILES:
+            print >> P, '    stats["core_%s_%dp"] = massage_qps_stats_helpers.percentile(h.buckets, %d, h.boundaries)' % (
+                histogram.name, pctl, pctl)
 
 with open('src/core/lib/debug/stats_data_bq_schema.sql', 'w') as S:
-  columns = []
-  for counter in inst_map['Counter']:
-    columns.append(('%s_per_iteration' % counter.name, 'FLOAT'))
-  print >>S, ',\n'.join('%s:%s' % x for x in columns)
-
+    columns = []
+    for counter in inst_map['Counter']:
+        columns.append(('%s_per_iteration' % counter.name, 'FLOAT'))
+    print >> S, ',\n'.join('%s:%s' % x for x in columns)
diff --git a/tools/debug/core/chttp2_ref_leak.py b/tools/debug/core/chttp2_ref_leak.py
index d693dd9..a6a5448 100755
--- a/tools/debug/core/chttp2_ref_leak.py
+++ b/tools/debug/core/chttp2_ref_leak.py
@@ -20,8 +20,10 @@
 import sys
 import re
 
+
 def new_obj():
-  return ['destroy']
+    return ['destroy']
+
 
 outstanding = collections.defaultdict(new_obj)
 
@@ -29,14 +31,14 @@
 # chttp2:unref:0x629000005200 2->1 destroy [src/core/ext/transport/chttp2/transport/chttp2_transport.c:599]
 
 for line in sys.stdin:
-  m = re.search(r'chttp2:(  ref|unref):0x([a-fA-F0-9]+) [^ ]+ ([^[]+) \[(.*)\]', line)
-  if m:
-    if m.group(1) == '  ref':
-      outstanding[m.group(2)].append(m.group(3))
-    else:
-      outstanding[m.group(2)].remove(m.group(3))
+    m = re.search(
+        r'chttp2:(  ref|unref):0x([a-fA-F0-9]+) [^ ]+ ([^[]+) \[(.*)\]', line)
+    if m:
+        if m.group(1) == '  ref':
+            outstanding[m.group(2)].append(m.group(3))
+        else:
+            outstanding[m.group(2)].remove(m.group(3))
 
 for obj, remaining in outstanding.items():
-  if remaining:
-    print 'LEAKED: %s %r' % (obj, remaining)
-
+    if remaining:
+        print 'LEAKED: %s %r' % (obj, remaining)
diff --git a/tools/debug/core/error_ref_leak.py b/tools/debug/core/error_ref_leak.py
index 6582328..7806338 100644
--- a/tools/debug/core/error_ref_leak.py
+++ b/tools/debug/core/error_ref_leak.py
@@ -26,22 +26,22 @@
 
 errs = []
 for line in data:
-  # if we care about the line
-  if re.search(r'error.cc', line):
-    # str manip to cut off left part of log line
-    line = line.partition('error.cc:')[-1]
-    line = re.sub(r'\d+] ', r'', line)
-    line = line.strip().split()
-    err = line[0].strip(":")
-    if line[1] == "create":
-      assert(err not in errs)
-      errs.append(err)
-    elif line[0] == "realloc":
-      errs.remove(line[1])
-      errs.append(line[3])
-    # explicitly look for the last dereference 
-    elif line[1] == "1" and line[3] == "0":
-      assert(err in errs)
-      errs.remove(err)
+    # if we care about the line
+    if re.search(r'error.cc', line):
+        # str manip to cut off left part of log line
+        line = line.partition('error.cc:')[-1]
+        line = re.sub(r'\d+] ', r'', line)
+        line = line.strip().split()
+        err = line[0].strip(":")
+        if line[1] == "create":
+            assert (err not in errs)
+            errs.append(err)
+        elif line[0] == "realloc":
+            errs.remove(line[1])
+            errs.append(line[3])
+        # explicitly look for the last dereference 
+        elif line[1] == "1" and line[3] == "0":
+            assert (err in errs)
+            errs.remove(err)
 
 print "leaked:", errs
diff --git a/tools/distrib/c-ish/check_documentation.py b/tools/distrib/c-ish/check_documentation.py
index 24da005..fef8f4e 100755
--- a/tools/distrib/c-ish/check_documentation.py
+++ b/tools/distrib/c-ish/check_documentation.py
@@ -22,24 +22,15 @@
 
 # where do we run
 _TARGET_DIRS = [
-  'include/grpc',
-  'include/grpc++',
-  'src/core',
-  'src/cpp',
-  'test/core',
-  'test/cpp'
+    'include/grpc', 'include/grpc++', 'src/core', 'src/cpp', 'test/core',
+    'test/cpp'
 ]
 
 # which file extensions do we care about
-_INTERESTING_EXTENSIONS = [
-  '.c',
-  '.h',
-  '.cc'
-]
+_INTERESTING_EXTENSIONS = ['.c', '.h', '.cc']
 
 # find our home
-_ROOT = os.path.abspath(
-    os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
+_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
 os.chdir(_ROOT)
 
 errors = 0
@@ -47,30 +38,30 @@
 # walk directories, find things
 printed_banner = False
 for target_dir in _TARGET_DIRS:
-  for root, dirs, filenames in os.walk(target_dir):
-    if 'README.md' not in filenames:
-      if not printed_banner:
-        print 'Missing README.md'
-        print '================='
-        printed_banner = True
-      print root
-      errors += 1
+    for root, dirs, filenames in os.walk(target_dir):
+        if 'README.md' not in filenames:
+            if not printed_banner:
+                print 'Missing README.md'
+                print '================='
+                printed_banner = True
+            print root
+            errors += 1
 if printed_banner: print
 printed_banner = False
 for target_dir in _TARGET_DIRS:
-  for root, dirs, filenames in os.walk(target_dir):
-    for filename in filenames:
-      if os.path.splitext(filename)[1] not in _INTERESTING_EXTENSIONS:
-        continue
-      path = os.path.join(root, filename)
-      with open(path) as f:
-        contents = f.read()
-      if '\\file' not in contents:
-        if not printed_banner:
-          print 'Missing \\file comment'
-          print '======================'
-          printed_banner = True
-        print path
-        errors += 1
+    for root, dirs, filenames in os.walk(target_dir):
+        for filename in filenames:
+            if os.path.splitext(filename)[1] not in _INTERESTING_EXTENSIONS:
+                continue
+            path = os.path.join(root, filename)
+            with open(path) as f:
+                contents = f.read()
+            if '\\file' not in contents:
+                if not printed_banner:
+                    print 'Missing \\file comment'
+                    print '======================'
+                    printed_banner = True
+                print path
+                errors += 1
 
 assert errors == 0, 'error count = %d' % errors
diff --git a/tools/distrib/check_copyright.py b/tools/distrib/check_copyright.py
index 6ecaced..f6e7362 100755
--- a/tools/distrib/check_copyright.py
+++ b/tools/distrib/check_copyright.py
@@ -22,149 +22,135 @@
 import subprocess
 
 # find our home
-ROOT = os.path.abspath(
-    os.path.join(os.path.dirname(sys.argv[0]), '../..'))
+ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 os.chdir(ROOT)
 
 # parse command line
 argp = argparse.ArgumentParser(description='copyright checker')
-argp.add_argument('-o', '--output',
-                  default='details',
-                  choices=['list', 'details'])
-argp.add_argument('-s', '--skips',
-                  default=0,
-                  action='store_const',
-                  const=1)
-argp.add_argument('-a', '--ancient',
-                  default=0,
-                  action='store_const',
-                  const=1)
-argp.add_argument('--precommit',
-                  default=False,
-                  action='store_true')
+argp.add_argument(
+    '-o', '--output', default='details', choices=['list', 'details'])
+argp.add_argument('-s', '--skips', default=0, action='store_const', const=1)
+argp.add_argument('-a', '--ancient', default=0, action='store_const', const=1)
+argp.add_argument('--precommit', default=False, action='store_true')
 args = argp.parse_args()
 
 # open the license text
 with open('NOTICE.txt') as f:
-  LICENSE_NOTICE = f.read().splitlines()
-
+    LICENSE_NOTICE = f.read().splitlines()
 
 # license format by file extension
 # key is the file extension, value is a format string
 # that given a line of license text, returns what should
 # be in the file
 LICENSE_PREFIX = {
-  '.bat':       r'@rem\s*',
-  '.c':         r'\s*(?://|\*)\s*',
-  '.cc':        r'\s*(?://|\*)\s*',
-  '.h':         r'\s*(?://|\*)\s*',
-  '.m':         r'\s*\*\s*',
-  '.php':       r'\s*\*\s*',
-  '.js':        r'\s*\*\s*',
-  '.py':        r'#\s*',
-  '.pyx':       r'#\s*',
-  '.pxd':       r'#\s*',
-  '.pxi':       r'#\s*',
-  '.rb':        r'#\s*',
-  '.sh':        r'#\s*',
-  '.proto':     r'//\s*',
-  '.cs':        r'//\s*',
-  '.mak':       r'#\s*',
-  'Makefile':   r'#\s*',
-  'Dockerfile': r'#\s*',
-  'BUILD':      r'#\s*',
+    '.bat': r'@rem\s*',
+    '.c': r'\s*(?://|\*)\s*',
+    '.cc': r'\s*(?://|\*)\s*',
+    '.h': r'\s*(?://|\*)\s*',
+    '.m': r'\s*\*\s*',
+    '.php': r'\s*\*\s*',
+    '.js': r'\s*\*\s*',
+    '.py': r'#\s*',
+    '.pyx': r'#\s*',
+    '.pxd': r'#\s*',
+    '.pxi': r'#\s*',
+    '.rb': r'#\s*',
+    '.sh': r'#\s*',
+    '.proto': r'//\s*',
+    '.cs': r'//\s*',
+    '.mak': r'#\s*',
+    'Makefile': r'#\s*',
+    'Dockerfile': r'#\s*',
+    'BUILD': r'#\s*',
 }
 
 _EXEMPT = frozenset((
-  # Generated protocol compiler output.
-  'examples/python/helloworld/helloworld_pb2.py',
-  'examples/python/helloworld/helloworld_pb2_grpc.py',
-  'examples/python/multiplex/helloworld_pb2.py',
-  'examples/python/multiplex/helloworld_pb2_grpc.py',
-  'examples/python/multiplex/route_guide_pb2.py',
-  'examples/python/multiplex/route_guide_pb2_grpc.py',
-  'examples/python/route_guide/route_guide_pb2.py',
-  'examples/python/route_guide/route_guide_pb2_grpc.py',
+    # Generated protocol compiler output.
+    'examples/python/helloworld/helloworld_pb2.py',
+    'examples/python/helloworld/helloworld_pb2_grpc.py',
+    'examples/python/multiplex/helloworld_pb2.py',
+    'examples/python/multiplex/helloworld_pb2_grpc.py',
+    'examples/python/multiplex/route_guide_pb2.py',
+    'examples/python/multiplex/route_guide_pb2_grpc.py',
+    'examples/python/route_guide/route_guide_pb2.py',
+    'examples/python/route_guide/route_guide_pb2_grpc.py',
+    'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
+    'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
+    'src/cpp/server/health/health.pb.h',
+    'src/cpp/server/health/health.pb.c',
 
-  'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
-  'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
-  'src/cpp/server/health/health.pb.h',
-  'src/cpp/server/health/health.pb.c',
-
-  # An older file originally from outside gRPC.
-  'src/php/tests/bootstrap.php',
-  # census.proto copied from github
-  'tools/grpcz/census.proto',
-  # status.proto copied from googleapis
-  'src/proto/grpc/status/status.proto',
-))
-
+    # An older file originally from outside gRPC.
+    'src/php/tests/bootstrap.php',
+    # census.proto copied from github
+    'tools/grpcz/census.proto',
+    # status.proto copied from googleapis
+    'src/proto/grpc/status/status.proto',))
 
 RE_YEAR = r'Copyright (?P<first_year>[0-9]+\-)?(?P<last_year>[0-9]+) gRPC authors.'
-RE_LICENSE = dict(
-    (k, r'\n'.join(
-        LICENSE_PREFIX[k] +
-        (RE_YEAR if re.search(RE_YEAR, line) else re.escape(line))
-        for line in LICENSE_NOTICE))
-     for k, v in LICENSE_PREFIX.iteritems())
+RE_LICENSE = dict((k, r'\n'.join(
+    LICENSE_PREFIX[k] + (RE_YEAR
+                         if re.search(RE_YEAR, line) else re.escape(line))
+    for line in LICENSE_NOTICE)) for k, v in LICENSE_PREFIX.iteritems())
 
 if args.precommit:
-  FILE_LIST_COMMAND = 'git status -z | grep -Poz \'(?<=^[MARC][MARCD ] )[^\s]+\''
+    FILE_LIST_COMMAND = 'git status -z | grep -Poz \'(?<=^[MARC][MARCD ] )[^\s]+\''
 else:
-  FILE_LIST_COMMAND = 'git ls-tree -r --name-only -r HEAD | ' \
-                      'grep -v ^third_party/ |' \
-                      'grep -v "\(ares_config.h\|ares_build.h\)"'
+    FILE_LIST_COMMAND = 'git ls-tree -r --name-only -r HEAD | ' \
+                        'grep -v ^third_party/ |' \
+                        'grep -v "\(ares_config.h\|ares_build.h\)"'
+
 
 def load(name):
-  with open(name) as f:
-    return f.read()
+    with open(name) as f:
+        return f.read()
+
 
 def save(name, text):
-  with open(name, 'w') as f:
-    f.write(text)
+    with open(name, 'w') as f:
+        f.write(text)
 
 
-assert(re.search(RE_LICENSE['Makefile'], load('Makefile')))
+assert (re.search(RE_LICENSE['Makefile'], load('Makefile')))
 
 
 def log(cond, why, filename):
-  if not cond: return
-  if args.output == 'details':
-    print '%s: %s' % (why, filename)
-  else:
-    print filename
+    if not cond: return
+    if args.output == 'details':
+        print '%s: %s' % (why, filename)
+    else:
+        print filename
 
 
 # scan files, validate the text
 ok = True
 filename_list = []
 try:
-  filename_list = subprocess.check_output(FILE_LIST_COMMAND,
-                                          shell=True).splitlines()
+    filename_list = subprocess.check_output(
+        FILE_LIST_COMMAND, shell=True).splitlines()
 except subprocess.CalledProcessError:
-  sys.exit(0)
+    sys.exit(0)
 
 for filename in filename_list:
-  if filename in _EXEMPT:
-    continue
-  ext = os.path.splitext(filename)[1]
-  base = os.path.basename(filename)
-  if ext in RE_LICENSE:
-    re_license = RE_LICENSE[ext]
-  elif base in RE_LICENSE:
-    re_license = RE_LICENSE[base]
-  else:
-    log(args.skips, 'skip', filename)
-    continue
-  try:
-    text = load(filename)
-  except:
-    continue
-  m = re.search(re_license, text)
-  if m:
-    pass
-  elif 'DO NOT EDIT' not in text and filename != 'src/boringssl/err_data.c':
-    log(1, 'copyright missing', filename)
-    ok = False
+    if filename in _EXEMPT:
+        continue
+    ext = os.path.splitext(filename)[1]
+    base = os.path.basename(filename)
+    if ext in RE_LICENSE:
+        re_license = RE_LICENSE[ext]
+    elif base in RE_LICENSE:
+        re_license = RE_LICENSE[base]
+    else:
+        log(args.skips, 'skip', filename)
+        continue
+    try:
+        text = load(filename)
+    except:
+        continue
+    m = re.search(re_license, text)
+    if m:
+        pass
+    elif 'DO NOT EDIT' not in text and filename != 'src/boringssl/err_data.c':
+        log(1, 'copyright missing', filename)
+        ok = False
 
 sys.exit(0 if ok else 1)
diff --git a/tools/distrib/check_include_guards.py b/tools/distrib/check_include_guards.py
index e46f1c9..24e076a 100755
--- a/tools/distrib/check_include_guards.py
+++ b/tools/distrib/check_include_guards.py
@@ -23,136 +23,136 @@
 
 
 def build_valid_guard(fpath):
-  prefix = 'GRPC_' if not fpath.startswith('include/') else ''
-  return prefix + '_'.join(fpath.replace('++', 'XX').replace('.', '_').upper().split('/')[1:])
+    prefix = 'GRPC_' if not fpath.startswith('include/') else ''
+    return prefix + '_'.join(
+        fpath.replace('++', 'XX').replace('.', '_').upper().split('/')[1:])
 
 
 def load(fpath):
-  with open(fpath, 'r') as f:
-    return f.read()
+    with open(fpath, 'r') as f:
+        return f.read()
 
 
 def save(fpath, contents):
-  with open(fpath, 'w') as f:
-    f.write(contents)
+    with open(fpath, 'w') as f:
+        f.write(contents)
 
 
 class GuardValidator(object):
-  def __init__(self):
-    self.ifndef_re = re.compile(r'#ifndef ([A-Z][A-Z_1-9]*)')
-    self.define_re = re.compile(r'#define ([A-Z][A-Z_1-9]*)')
-    self.endif_c_re = re.compile(r'#endif /\* ([A-Z][A-Z_1-9]*) (?:\\ *\n *)?\*/')
-    self.endif_cpp_re = re.compile(r'#endif  // ([A-Z][A-Z_1-9]*)')
-    self.failed = False
 
-  def fail(self, fpath, regexp, fcontents, match_txt, correct, fix):
-    cpp_header = 'grpc++' in fpath
-    self.failed = True
-    invalid_guards_msg_template = (
-        '{0}: Missing preprocessor guards (RE {1}). '
-        'Please wrap your code around the following guards:\n'
-        '#ifndef {2}\n'
-        '#define {2}\n'
-        '...\n'
-        '... epic code ...\n'
-        '...\n') + ('#endif  // {2}' if cpp_header else '#endif /* {2} */')
-    if not match_txt:
-      print invalid_guards_msg_template.format(fpath, regexp.pattern,
-                                               build_valid_guard(fpath))
-      return fcontents
-
-    print ('{}: Wrong preprocessor guards (RE {}):'
-           '\n\tFound {}, expected {}').format(
-        fpath, regexp.pattern, match_txt, correct)
-    if fix:
-      print 'Fixing {}...\n'.format(fpath)
-      fixed_fcontents = re.sub(match_txt, correct, fcontents)
-      if fixed_fcontents:
+    def __init__(self):
+        self.ifndef_re = re.compile(r'#ifndef ([A-Z][A-Z_1-9]*)')
+        self.define_re = re.compile(r'#define ([A-Z][A-Z_1-9]*)')
+        self.endif_c_re = re.compile(
+            r'#endif /\* ([A-Z][A-Z_1-9]*) (?:\\ *\n *)?\*/')
+        self.endif_cpp_re = re.compile(r'#endif  // ([A-Z][A-Z_1-9]*)')
         self.failed = False
-      return fixed_fcontents
-    else:
-      print
-    return fcontents
 
-  def check(self, fpath, fix):
-    cpp_header = 'grpc++' in fpath
-    valid_guard = build_valid_guard(fpath)
+    def fail(self, fpath, regexp, fcontents, match_txt, correct, fix):
+        cpp_header = 'grpc++' in fpath
+        self.failed = True
+        invalid_guards_msg_template = (
+            '{0}: Missing preprocessor guards (RE {1}). '
+            'Please wrap your code around the following guards:\n'
+            '#ifndef {2}\n'
+            '#define {2}\n'
+            '...\n'
+            '... epic code ...\n'
+            '...\n') + ('#endif  // {2}' if cpp_header else '#endif /* {2} */')
+        if not match_txt:
+            print invalid_guards_msg_template.format(fpath, regexp.pattern,
+                                                     build_valid_guard(fpath))
+            return fcontents
 
-    fcontents = load(fpath)
-
-    match = self.ifndef_re.search(fcontents)
-    if not match:
-      print 'something drastically wrong with: %s' % fpath
-      return False # failed
-    if match.lastindex is None:
-      # No ifndef. Request manual addition with hints
-      self.fail(fpath, match.re, match.string, '', '', False)
-      return False  # failed
-
-    # Does the guard end with a '_H'?
-    running_guard = match.group(1)
-    if not running_guard.endswith('_H'):
-      fcontents = self.fail(fpath, match.re, match.string, match.group(1),
-                            valid_guard, fix)
-      if fix: save(fpath, fcontents)
-
-    # Is it the expected one based on the file path?
-    if running_guard != valid_guard:
-      fcontents = self.fail(fpath, match.re, match.string, match.group(1),
-                            valid_guard, fix)
-      if fix: save(fpath, fcontents)
-
-    # Is there a #define? Is it the same as the #ifndef one?
-    match = self.define_re.search(fcontents)
-    if match.lastindex is None:
-      # No define. Request manual addition with hints
-      self.fail(fpath, match.re, match.string, '', '', False)
-      return False  # failed
-
-    # Is the #define guard the same as the #ifndef guard?
-    if match.group(1) != running_guard:
-      fcontents = self.fail(fpath, match.re, match.string, match.group(1),
-                            valid_guard, fix)
-      if fix: save(fpath, fcontents)
-
-    # Is there a properly commented #endif?
-    endif_re = self.endif_cpp_re if cpp_header else self.endif_c_re
-    flines = fcontents.rstrip().splitlines()
-    match = endif_re.search('\n'.join(flines[-2:]))
-    if not match:
-      # No endif. Check if we have the last line as just '#endif' and if so
-      # replace it with a properly commented one.
-      if flines[-1] == '#endif':
-        flines[-1] = ('#endif' +
-                      ('  // {}\n'.format(valid_guard) if cpp_header
-                       else ' /* {} */\n'.format(valid_guard)))
+        print('{}: Wrong preprocessor guards (RE {}):'
+              '\n\tFound {}, expected {}').format(fpath, regexp.pattern,
+                                                  match_txt, correct)
         if fix:
-            fcontents = '\n'.join(flines)
-            save(fpath, fcontents)
-      else:
-        # something else is wrong, bail out
-        self.fail(fpath, endif_re, flines[-1], '', '', False)
-    elif match.group(1) != running_guard:
-      # Is the #endif guard the same as the #ifndef and #define guards?
-      fcontents = self.fail(fpath, endif_re, fcontents, match.group(1),
-                            valid_guard, fix)
-      if fix: save(fpath, fcontents)
+            print 'Fixing {}...\n'.format(fpath)
+            fixed_fcontents = re.sub(match_txt, correct, fcontents)
+            if fixed_fcontents:
+                self.failed = False
+            return fixed_fcontents
+        else:
+            print
+        return fcontents
 
-    return not self.failed  # Did the check succeed? (ie, not failed)
+    def check(self, fpath, fix):
+        cpp_header = 'grpc++' in fpath
+        valid_guard = build_valid_guard(fpath)
+
+        fcontents = load(fpath)
+
+        match = self.ifndef_re.search(fcontents)
+        if not match:
+            print 'something drastically wrong with: %s' % fpath
+            return False  # failed
+        if match.lastindex is None:
+            # No ifndef. Request manual addition with hints
+            self.fail(fpath, match.re, match.string, '', '', False)
+            return False  # failed
+
+        # Does the guard end with a '_H'?
+        running_guard = match.group(1)
+        if not running_guard.endswith('_H'):
+            fcontents = self.fail(fpath, match.re, match.string,
+                                  match.group(1), valid_guard, fix)
+            if fix: save(fpath, fcontents)
+
+        # Is it the expected one based on the file path?
+        if running_guard != valid_guard:
+            fcontents = self.fail(fpath, match.re, match.string,
+                                  match.group(1), valid_guard, fix)
+            if fix: save(fpath, fcontents)
+
+        # Is there a #define? Is it the same as the #ifndef one?
+        match = self.define_re.search(fcontents)
+        if match.lastindex is None:
+            # No define. Request manual addition with hints
+            self.fail(fpath, match.re, match.string, '', '', False)
+            return False  # failed
+
+        # Is the #define guard the same as the #ifndef guard?
+        if match.group(1) != running_guard:
+            fcontents = self.fail(fpath, match.re, match.string,
+                                  match.group(1), valid_guard, fix)
+            if fix: save(fpath, fcontents)
+
+        # Is there a properly commented #endif?
+        endif_re = self.endif_cpp_re if cpp_header else self.endif_c_re
+        flines = fcontents.rstrip().splitlines()
+        match = endif_re.search('\n'.join(flines[-2:]))
+        if not match:
+            # No endif. Check if we have the last line as just '#endif' and if so
+            # replace it with a properly commented one.
+            if flines[-1] == '#endif':
+                flines[-1] = (
+                    '#endif' +
+                    ('  // {}\n'.format(valid_guard)
+                     if cpp_header else ' /* {} */\n'.format(valid_guard)))
+                if fix:
+                    fcontents = '\n'.join(flines)
+                    save(fpath, fcontents)
+            else:
+                # something else is wrong, bail out
+                self.fail(fpath, endif_re, flines[-1], '', '', False)
+        elif match.group(1) != running_guard:
+            # Is the #endif guard the same as the #ifndef and #define guards?
+            fcontents = self.fail(fpath, endif_re, fcontents,
+                                  match.group(1), valid_guard, fix)
+            if fix: save(fpath, fcontents)
+
+        return not self.failed  # Did the check succeed? (ie, not failed)
+
 
 # find our home
-ROOT = os.path.abspath(
-    os.path.join(os.path.dirname(sys.argv[0]), '../..'))
+ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 os.chdir(ROOT)
 
 # parse command line
 argp = argparse.ArgumentParser(description='include guard checker')
-argp.add_argument('-f', '--fix',
-                  default=False,
-                  action='store_true');
-argp.add_argument('--precommit',
-                  default=False,
-                  action='store_true')
+argp.add_argument('-f', '--fix', default=False, action='store_true')
+argp.add_argument('--precommit', default=False, action='store_true')
 args = argp.parse_args()
 
 KNOWN_BAD = set([
@@ -161,12 +161,11 @@
     'include/grpc++/ext/reflection.pb.h',
 ])
 
-
 grep_filter = r"grep -E '^(include|src/core)/.*\.h$'"
 if args.precommit:
-  git_command = 'git diff --name-only HEAD'
+    git_command = 'git diff --name-only HEAD'
 else:
-  git_command = 'git ls-tree -r --name-only -r HEAD'
+    git_command = 'git ls-tree -r --name-only -r HEAD'
 
 FILE_LIST_COMMAND = ' | '.join((git_command, grep_filter))
 
@@ -174,17 +173,17 @@
 ok = True
 filename_list = []
 try:
-  filename_list = subprocess.check_output(FILE_LIST_COMMAND,
-                                          shell=True).splitlines()
-  # Filter out non-existent files (ie, file removed or renamed)
-  filename_list = (f for f in filename_list if os.path.isfile(f))
+    filename_list = subprocess.check_output(
+        FILE_LIST_COMMAND, shell=True).splitlines()
+    # Filter out non-existent files (ie, file removed or renamed)
+    filename_list = (f for f in filename_list if os.path.isfile(f))
 except subprocess.CalledProcessError:
-  sys.exit(0)
+    sys.exit(0)
 
 validator = GuardValidator()
 
 for filename in filename_list:
-  if filename in KNOWN_BAD: continue
-  ok = ok and validator.check(filename, args.fix)
+    if filename in KNOWN_BAD: continue
+    ok = ok and validator.check(filename, args.fix)
 
 sys.exit(0 if ok else 1)
diff --git a/tools/distrib/python/check_grpcio_tools.py b/tools/distrib/python/check_grpcio_tools.py
index b56ccae..2363017 100755
--- a/tools/distrib/python/check_grpcio_tools.py
+++ b/tools/distrib/python/check_grpcio_tools.py
@@ -23,12 +23,11 @@
 submodule_commit_hash = _make.protobuf_submodule_commit_hash()
 
 with open(_make.GRPC_PYTHON_PROTOC_LIB_DEPS, 'r') as _protoc_lib_deps_file:
-  content = _protoc_lib_deps_file.read().splitlines()
+    content = _protoc_lib_deps_file.read().splitlines()
 
-testString = (_make.COMMIT_HASH_PREFIX +
-              submodule_commit_hash +
-              _make.COMMIT_HASH_SUFFIX)
+testString = (
+    _make.COMMIT_HASH_PREFIX + submodule_commit_hash + _make.COMMIT_HASH_SUFFIX)
 
 if testString not in content:
-  print(OUT_OF_DATE_MESSAGE.format(_make.GRPC_PYTHON_PROTOC_LIB_DEPS))
-  raise SystemExit(1)
+    print(OUT_OF_DATE_MESSAGE.format(_make.GRPC_PYTHON_PROTOC_LIB_DEPS))
+    raise SystemExit(1)
diff --git a/tools/distrib/python/docgen.py b/tools/distrib/python/docgen.py
index 1822e51..4d6fcb5 100755
--- a/tools/distrib/python/docgen.py
+++ b/tools/distrib/python/docgen.py
@@ -24,14 +24,20 @@
 import tempfile
 
 parser = argparse.ArgumentParser()
-parser.add_argument('--config', metavar='c', type=str, nargs=1,
-                    help='GRPC/GPR libraries build configuration',
-                    default='opt')
+parser.add_argument(
+    '--config',
+    metavar='c',
+    type=str,
+    nargs=1,
+    help='GRPC/GPR libraries build configuration',
+    default='opt')
 parser.add_argument('--submit', action='store_true')
 parser.add_argument('--gh-user', type=str, help='GitHub user to push as.')
-parser.add_argument('--gh-repo-owner', type=str,
-                    help=('Owner of the GitHub repository to be pushed; '
-                          'defaults to --gh-user.'))
+parser.add_argument(
+    '--gh-repo-owner',
+    type=str,
+    help=('Owner of the GitHub repository to be pushed; '
+          'defaults to --gh-user.'))
 parser.add_argument('--doc-branch', type=str)
 args = parser.parse_args()
 
@@ -59,60 +65,75 @@
 })
 
 subprocess_arguments_list = [
-    {'args': ['virtualenv', VIRTUALENV_DIR], 'env': environment},
-    {'args': [VIRTUALENV_PIP_PATH, 'install', '--upgrade', 'pip==9.0.1'],
-     'env': environment},
-    {'args': [VIRTUALENV_PIP_PATH, 'install', '-r', REQUIREMENTS_PATH],
-     'env': environment},
-    {'args': [VIRTUALENV_PYTHON_PATH, SETUP_PATH, 'build'], 'env': environment},
-    {'args': [VIRTUALENV_PYTHON_PATH, SETUP_PATH, 'doc'], 'env': environment},
+    {
+        'args': ['virtualenv', VIRTUALENV_DIR],
+        'env': environment
+    },
+    {
+        'args': [VIRTUALENV_PIP_PATH, 'install', '--upgrade', 'pip==9.0.1'],
+        'env': environment
+    },
+    {
+        'args': [VIRTUALENV_PIP_PATH, 'install', '-r', REQUIREMENTS_PATH],
+        'env': environment
+    },
+    {
+        'args': [VIRTUALENV_PYTHON_PATH, SETUP_PATH, 'build'],
+        'env': environment
+    },
+    {
+        'args': [VIRTUALENV_PYTHON_PATH, SETUP_PATH, 'doc'],
+        'env': environment
+    },
 ]
 
 for subprocess_arguments in subprocess_arguments_list:
-  print('Running command: {}'.format(subprocess_arguments['args']))
-  subprocess.check_call(**subprocess_arguments)
+    print('Running command: {}'.format(subprocess_arguments['args']))
+    subprocess.check_call(**subprocess_arguments)
 
 if args.submit:
-  assert args.gh_user
-  assert args.doc_branch
-  github_user = args.gh_user
-  github_repository_owner = (
-      args.gh_repo_owner if args.gh_repo_owner else args.gh_user)
-  # Create a temporary directory out of tree, checkout gh-pages from the
-  # specified repository, edit it, and push it. It's up to the user to then go
-  # onto GitHub and make a PR against grpc/grpc:gh-pages.
-  repo_parent_dir = tempfile.mkdtemp()
-  print('Documentation parent directory: {}'.format(repo_parent_dir))
-  repo_dir = os.path.join(repo_parent_dir, 'grpc')
-  python_doc_dir = os.path.join(repo_dir, 'python')
-  doc_branch = args.doc_branch
+    assert args.gh_user
+    assert args.doc_branch
+    github_user = args.gh_user
+    github_repository_owner = (args.gh_repo_owner
+                               if args.gh_repo_owner else args.gh_user)
+    # Create a temporary directory out of tree, checkout gh-pages from the
+    # specified repository, edit it, and push it. It's up to the user to then go
+    # onto GitHub and make a PR against grpc/grpc:gh-pages.
+    repo_parent_dir = tempfile.mkdtemp()
+    print('Documentation parent directory: {}'.format(repo_parent_dir))
+    repo_dir = os.path.join(repo_parent_dir, 'grpc')
+    python_doc_dir = os.path.join(repo_dir, 'python')
+    doc_branch = args.doc_branch
 
-  print('Cloning your repository...')
-  subprocess.check_call([
-          'git', 'clone', 'https://{}@github.com/{}/grpc'.format(
-              github_user, github_repository_owner)
-      ], cwd=repo_parent_dir)
-  subprocess.check_call([
-          'git', 'remote', 'add', 'upstream', 'https://github.com/grpc/grpc'
-      ], cwd=repo_dir)
-  subprocess.check_call(['git', 'fetch', 'upstream'], cwd=repo_dir)
-  subprocess.check_call([
-          'git', 'checkout', 'upstream/gh-pages', '-b', doc_branch
-      ], cwd=repo_dir)
-  print('Updating documentation...')
-  shutil.rmtree(python_doc_dir, ignore_errors=True)
-  shutil.copytree(DOC_PATH, python_doc_dir)
-  print('Attempting to push documentation...')
-  try:
-    subprocess.check_call(['git', 'add', '--all'], cwd=repo_dir)
-    subprocess.check_call([
-            'git', 'commit', '-m', 'Auto-update Python documentation'
-        ], cwd=repo_dir)
-    subprocess.check_call([
-            'git', 'push', '--set-upstream', 'origin', doc_branch
-        ], cwd=repo_dir)
-  except subprocess.CalledProcessError:
-    print('Failed to push documentation. Examine this directory and push '
-          'manually: {}'.format(repo_parent_dir))
-    sys.exit(1)
-  shutil.rmtree(repo_parent_dir)
+    print('Cloning your repository...')
+    subprocess.check_call(
+        [
+            'git', 'clone', 'https://{}@github.com/{}/grpc'.format(
+                github_user, github_repository_owner)
+        ],
+        cwd=repo_parent_dir)
+    subprocess.check_call(
+        ['git', 'remote', 'add', 'upstream', 'https://github.com/grpc/grpc'],
+        cwd=repo_dir)
+    subprocess.check_call(['git', 'fetch', 'upstream'], cwd=repo_dir)
+    subprocess.check_call(
+        ['git', 'checkout', 'upstream/gh-pages', '-b', doc_branch],
+        cwd=repo_dir)
+    print('Updating documentation...')
+    shutil.rmtree(python_doc_dir, ignore_errors=True)
+    shutil.copytree(DOC_PATH, python_doc_dir)
+    print('Attempting to push documentation...')
+    try:
+        subprocess.check_call(['git', 'add', '--all'], cwd=repo_dir)
+        subprocess.check_call(
+            ['git', 'commit', '-m', 'Auto-update Python documentation'],
+            cwd=repo_dir)
+        subprocess.check_call(
+            ['git', 'push', '--set-upstream', 'origin', doc_branch],
+            cwd=repo_dir)
+    except subprocess.CalledProcessError:
+        print('Failed to push documentation. Examine this directory and push '
+              'manually: {}'.format(repo_parent_dir))
+        sys.exit(1)
+    shutil.rmtree(repo_parent_dir)
diff --git a/tools/distrib/python/grpcio_tools/grpc_tools/__init__.py b/tools/distrib/python/grpcio_tools/grpc_tools/__init__.py
index 1454b67..5772620 100644
--- a/tools/distrib/python/grpcio_tools/grpc_tools/__init__.py
+++ b/tools/distrib/python/grpcio_tools/grpc_tools/__init__.py
@@ -11,4 +11,3 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
diff --git a/tools/distrib/python/grpcio_tools/grpc_tools/command.py b/tools/distrib/python/grpcio_tools/grpc_tools/command.py
index 28be137..c0f9d93 100644
--- a/tools/distrib/python/grpcio_tools/grpc_tools/command.py
+++ b/tools/distrib/python/grpcio_tools/grpc_tools/command.py
@@ -22,43 +22,44 @@
 
 
 def build_package_protos(package_root):
-  proto_files = []
-  inclusion_root = os.path.abspath(package_root)
-  for root, _, files in os.walk(inclusion_root):
-    for filename in files:
-      if filename.endswith('.proto'):
-        proto_files.append(os.path.abspath(os.path.join(root, filename)))
+    proto_files = []
+    inclusion_root = os.path.abspath(package_root)
+    for root, _, files in os.walk(inclusion_root):
+        for filename in files:
+            if filename.endswith('.proto'):
+                proto_files.append(
+                    os.path.abspath(os.path.join(root, filename)))
 
-  well_known_protos_include = pkg_resources.resource_filename(
-      'grpc_tools', '_proto')
+    well_known_protos_include = pkg_resources.resource_filename('grpc_tools',
+                                                                '_proto')
 
-  for proto_file in proto_files:
-    command = [
-        'grpc_tools.protoc',
-        '--proto_path={}'.format(inclusion_root),
-        '--proto_path={}'.format(well_known_protos_include),
-        '--python_out={}'.format(inclusion_root),
-        '--grpc_python_out={}'.format(inclusion_root),
-    ] + [proto_file]
-    if protoc.main(command) != 0:
-      sys.stderr.write('warning: {} failed'.format(command))
+    for proto_file in proto_files:
+        command = [
+            'grpc_tools.protoc',
+            '--proto_path={}'.format(inclusion_root),
+            '--proto_path={}'.format(well_known_protos_include),
+            '--python_out={}'.format(inclusion_root),
+            '--grpc_python_out={}'.format(inclusion_root),
+        ] + [proto_file]
+        if protoc.main(command) != 0:
+            sys.stderr.write('warning: {} failed'.format(command))
 
 
 class BuildPackageProtos(setuptools.Command):
-  """Command to generate project *_pb2.py modules from proto files."""
+    """Command to generate project *_pb2.py modules from proto files."""
 
-  description = 'build grpc protobuf modules'
-  user_options = []
+    description = 'build grpc protobuf modules'
+    user_options = []
 
-  def initialize_options(self):
-    pass
+    def initialize_options(self):
+        pass
 
-  def finalize_options(self):
-    pass
+    def finalize_options(self):
+        pass
 
-  def run(self):
-    # due to limitations of the proto generator, we require that only *one*
-    # directory is provided as an 'include' directory. We assume it's the '' key
-    # to `self.distribution.package_dir` (and get a key error if it's not
-    # there).
-    build_package_protos(self.distribution.package_dir[''])
+    def run(self):
+        # due to limitations of the proto generator, we require that only *one*
+        # directory is provided as an 'include' directory. We assume it's the '' key
+        # to `self.distribution.package_dir` (and get a key error if it's not
+        # there).
+        build_package_protos(self.distribution.package_dir[''])
diff --git a/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py b/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py
index efad51e..582cba0 100644
--- a/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py
+++ b/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py
@@ -19,16 +19,18 @@
 
 from grpc_tools import _protoc_compiler
 
+
 def main(command_arguments):
-  """Run the protocol buffer compiler with the given command-line arguments.
+    """Run the protocol buffer compiler with the given command-line arguments.
 
   Args:
     command_arguments: a list of strings representing command line arguments to
         `protoc`.
   """
-  command_arguments = [argument.encode() for argument in command_arguments]
-  return _protoc_compiler.run_main(command_arguments)
+    command_arguments = [argument.encode() for argument in command_arguments]
+    return _protoc_compiler.run_main(command_arguments)
+
 
 if __name__ == '__main__':
-  proto_include = pkg_resources.resource_filename('grpc_tools', '_proto')
-  sys.exit(main(sys.argv + ['-I{}'.format(proto_include)]))
+    proto_include = pkg_resources.resource_filename('grpc_tools', '_proto')
+    sys.exit(main(sys.argv + ['-I{}'.format(proto_include)]))
diff --git a/tools/distrib/python/grpcio_tools/grpc_version.py b/tools/distrib/python/grpcio_tools/grpc_version.py
index f613025..c4ed066 100644
--- a/tools/distrib/python/grpcio_tools/grpc_version.py
+++ b/tools/distrib/python/grpcio_tools/grpc_version.py
@@ -14,4 +14,4 @@
 
 # AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!!
 
-VERSION='1.9.0.dev0'
+VERSION = '1.9.0.dev0'
diff --git a/tools/distrib/python/grpcio_tools/setup.py b/tools/distrib/python/grpcio_tools/setup.py
index 8d95cb5..342a220 100644
--- a/tools/distrib/python/grpcio_tools/setup.py
+++ b/tools/distrib/python/grpcio_tools/setup.py
@@ -66,42 +66,42 @@
 EXTRA_ENV_COMPILE_ARGS = os.environ.get('GRPC_PYTHON_CFLAGS', None)
 EXTRA_ENV_LINK_ARGS = os.environ.get('GRPC_PYTHON_LDFLAGS', None)
 if EXTRA_ENV_COMPILE_ARGS is None:
-  EXTRA_ENV_COMPILE_ARGS = '-std=c++11'
-  if 'win32' in sys.platform:
-    if sys.version_info < (3, 5):
-      # We use define flags here and don't directly add to DEFINE_MACROS below to
-      # ensure that the expert user/builder has a way of turning it off (via the
-      # envvars) without adding yet more GRPC-specific envvars.
-      # See https://sourceforge.net/p/mingw-w64/bugs/363/
-      if '32' in platform.architecture()[0]:
-        EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s -D_hypot=hypot'
-      else:
-        EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64 -D_hypot=hypot'
-    else:
-      # We need to statically link the C++ Runtime, only the C runtime is
-      # available dynamically
-      EXTRA_ENV_COMPILE_ARGS += ' /MT'
-  elif "linux" in sys.platform or "darwin" in sys.platform:
-    EXTRA_ENV_COMPILE_ARGS += ' -fno-wrapv -frtti'
+    EXTRA_ENV_COMPILE_ARGS = '-std=c++11'
+    if 'win32' in sys.platform:
+        if sys.version_info < (3, 5):
+            # We use define flags here and don't directly add to DEFINE_MACROS below to
+            # ensure that the expert user/builder has a way of turning it off (via the
+            # envvars) without adding yet more GRPC-specific envvars.
+            # See https://sourceforge.net/p/mingw-w64/bugs/363/
+            if '32' in platform.architecture()[0]:
+                EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s -D_hypot=hypot'
+            else:
+                EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64 -D_hypot=hypot'
+        else:
+            # We need to statically link the C++ Runtime, only the C runtime is
+            # available dynamically
+            EXTRA_ENV_COMPILE_ARGS += ' /MT'
+    elif "linux" in sys.platform or "darwin" in sys.platform:
+        EXTRA_ENV_COMPILE_ARGS += ' -fno-wrapv -frtti'
 if EXTRA_ENV_LINK_ARGS is None:
-  EXTRA_ENV_LINK_ARGS = ''
-  if "linux" in sys.platform or "darwin" in sys.platform:
-    EXTRA_ENV_LINK_ARGS += ' -lpthread'
-  elif "win32" in sys.platform and sys.version_info < (3, 5):
-    msvcr = cygwinccompiler.get_msvcr()[0]
-    # TODO(atash) sift through the GCC specs to see if libstdc++ can have any
-    # influence on the linkage outcome on MinGW for non-C++ programs.
-    EXTRA_ENV_LINK_ARGS += (
-        ' -static-libgcc -static-libstdc++ -mcrtdll={msvcr} '
-        '-static'.format(msvcr=msvcr))
+    EXTRA_ENV_LINK_ARGS = ''
+    if "linux" in sys.platform or "darwin" in sys.platform:
+        EXTRA_ENV_LINK_ARGS += ' -lpthread'
+    elif "win32" in sys.platform and sys.version_info < (3, 5):
+        msvcr = cygwinccompiler.get_msvcr()[0]
+        # TODO(atash) sift through the GCC specs to see if libstdc++ can have any
+        # influence on the linkage outcome on MinGW for non-C++ programs.
+        EXTRA_ENV_LINK_ARGS += (
+            ' -static-libgcc -static-libstdc++ -mcrtdll={msvcr} '
+            '-static'.format(msvcr=msvcr))
 
 EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS)
 EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS)
 
-CC_FILES = [
-  os.path.normpath(cc_file) for cc_file in protoc_lib_deps.CC_FILES]
+CC_FILES = [os.path.normpath(cc_file) for cc_file in protoc_lib_deps.CC_FILES]
 PROTO_FILES = [
-  os.path.normpath(proto_file) for proto_file in protoc_lib_deps.PROTO_FILES]
+    os.path.normpath(proto_file) for proto_file in protoc_lib_deps.PROTO_FILES
+]
 CC_INCLUDE = os.path.normpath(protoc_lib_deps.CC_INCLUDE)
 PROTO_INCLUDE = os.path.normpath(protoc_lib_deps.PROTO_INCLUDE)
 
@@ -110,107 +110,114 @@
 
 DEFINE_MACROS = ()
 if "win32" in sys.platform:
-  DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1),)
-  if '64bit' in platform.architecture()[0]:
-    DEFINE_MACROS += (('MS_WIN64', 1),)
+    DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1),)
+    if '64bit' in platform.architecture()[0]:
+        DEFINE_MACROS += (('MS_WIN64', 1),)
 elif "linux" in sys.platform or "darwin" in sys.platform:
-  DEFINE_MACROS += (('HAVE_PTHREAD', 1),)
+    DEFINE_MACROS += (('HAVE_PTHREAD', 1),)
 
 # By default, Python3 distutils enforces compatibility of
 # c plugins (.so files) with the OSX version Python3 was built with.
 # For Python3.4, this is OSX 10.6, but we need Thread Local Support (__thread)
 if 'darwin' in sys.platform and PY3:
-  mac_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
-  if mac_target and (pkg_resources.parse_version(mac_target) <
-		     pkg_resources.parse_version('10.9.0')):
-    os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'
-    os.environ['_PYTHON_HOST_PLATFORM'] = re.sub(
-        r'macosx-[0-9]+\.[0-9]+-(.+)',
-        r'macosx-10.9-\1',
-        util.get_platform())
+    mac_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
+    if mac_target and (pkg_resources.parse_version(mac_target) <
+                       pkg_resources.parse_version('10.9.0')):
+        os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'
+        os.environ['_PYTHON_HOST_PLATFORM'] = re.sub(
+            r'macosx-[0-9]+\.[0-9]+-(.+)', r'macosx-10.9-\1',
+            util.get_platform())
+
 
 def package_data():
-  tools_path = GRPC_PYTHON_TOOLS_PACKAGE.replace('.', os.path.sep)
-  proto_resources_path = os.path.join(tools_path,
-                                      GRPC_PYTHON_PROTO_RESOURCES_NAME)
-  proto_files = []
-  for proto_file in PROTO_FILES:
-    source = os.path.join(PROTO_INCLUDE, proto_file)
-    target = os.path.join(proto_resources_path, proto_file)
-    relative_target = os.path.join(GRPC_PYTHON_PROTO_RESOURCES_NAME, proto_file)
-    try:
-      os.makedirs(os.path.dirname(target))
-    except OSError as error:
-      if error.errno == errno.EEXIST:
-        pass
-      else:
-        raise
-    shutil.copy(source, target)
-    proto_files.append(relative_target)
-  return {GRPC_PYTHON_TOOLS_PACKAGE: proto_files}
+    tools_path = GRPC_PYTHON_TOOLS_PACKAGE.replace('.', os.path.sep)
+    proto_resources_path = os.path.join(tools_path,
+                                        GRPC_PYTHON_PROTO_RESOURCES_NAME)
+    proto_files = []
+    for proto_file in PROTO_FILES:
+        source = os.path.join(PROTO_INCLUDE, proto_file)
+        target = os.path.join(proto_resources_path, proto_file)
+        relative_target = os.path.join(GRPC_PYTHON_PROTO_RESOURCES_NAME,
+                                       proto_file)
+        try:
+            os.makedirs(os.path.dirname(target))
+        except OSError as error:
+            if error.errno == errno.EEXIST:
+                pass
+            else:
+                raise
+        shutil.copy(source, target)
+        proto_files.append(relative_target)
+    return {GRPC_PYTHON_TOOLS_PACKAGE: proto_files}
+
 
 def extension_modules():
-  if BUILD_WITH_CYTHON:
-    plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.pyx')]
-  else:
-    plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.cpp')]
+    if BUILD_WITH_CYTHON:
+        plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.pyx')]
+    else:
+        plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.cpp')]
 
-  plugin_sources += [
-    os.path.join('grpc_tools', 'main.cc'),
-    os.path.join('grpc_root', 'src', 'compiler', 'python_generator.cc')]
+    plugin_sources += [
+        os.path.join('grpc_tools', 'main.cc'),
+        os.path.join('grpc_root', 'src', 'compiler', 'python_generator.cc')
+    ]
 
-  #HACK: Substitute the embed.cc, which is a JS to C++
-  #      preprocessor with the generated code.
-  #      The generated code should not be material
-  #      to the parts of protoc we use (it affects
-  #      the JavaScript code generator, supposedly),
-  #      but we need to be cautious about it.
-  cc_files_clone = list(CC_FILES)
-  embed_cc_file = os.path.normpath('google/protobuf/compiler/js/embed.cc')
-  well_known_types_file = os.path.normpath(
-      'google/protobuf/compiler/js/well_known_types_embed.cc')
-  if embed_cc_file in cc_files_clone:
-    cc_files_clone.remove(embed_cc_file)
-  if well_known_types_file in cc_files_clone:
-    cc_files_clone.remove(well_known_types_file)
-    plugin_sources += [os.path.join('grpc_tools', 'protobuf_generated_well_known_types_embed.cc')]
-  plugin_sources += [os.path.join(CC_INCLUDE, cc_file) for cc_file in cc_files_clone]
+    #HACK: Substitute the embed.cc, which is a JS to C++
+    #      preprocessor with the generated code.
+    #      The generated code should not be material
+    #      to the parts of protoc we use (it affects
+    #      the JavaScript code generator, supposedly),
+    #      but we need to be cautious about it.
+    cc_files_clone = list(CC_FILES)
+    embed_cc_file = os.path.normpath('google/protobuf/compiler/js/embed.cc')
+    well_known_types_file = os.path.normpath(
+        'google/protobuf/compiler/js/well_known_types_embed.cc')
+    if embed_cc_file in cc_files_clone:
+        cc_files_clone.remove(embed_cc_file)
+    if well_known_types_file in cc_files_clone:
+        cc_files_clone.remove(well_known_types_file)
+        plugin_sources += [
+            os.path.join('grpc_tools',
+                         'protobuf_generated_well_known_types_embed.cc')
+        ]
+    plugin_sources += [
+        os.path.join(CC_INCLUDE, cc_file) for cc_file in cc_files_clone
+    ]
 
-  plugin_ext = extension.Extension(
-      name='grpc_tools._protoc_compiler',
-      sources=plugin_sources,
-      include_dirs=[
-          '.',
-          'grpc_root',
-          os.path.join('grpc_root', 'include'),
-          CC_INCLUDE,
-      ],
-      language='c++',
-      define_macros=list(DEFINE_MACROS),
-      extra_compile_args=list(EXTRA_COMPILE_ARGS),
-      extra_link_args=list(EXTRA_LINK_ARGS),
-  )
-  extensions = [plugin_ext]
-  if BUILD_WITH_CYTHON:
-    from Cython import Build
-    return Build.cythonize(extensions)
-  else:
-    return extensions
+    plugin_ext = extension.Extension(
+        name='grpc_tools._protoc_compiler',
+        sources=plugin_sources,
+        include_dirs=[
+            '.',
+            'grpc_root',
+            os.path.join('grpc_root', 'include'),
+            CC_INCLUDE,
+        ],
+        language='c++',
+        define_macros=list(DEFINE_MACROS),
+        extra_compile_args=list(EXTRA_COMPILE_ARGS),
+        extra_link_args=list(EXTRA_LINK_ARGS),)
+    extensions = [plugin_ext]
+    if BUILD_WITH_CYTHON:
+        from Cython import Build
+        return Build.cythonize(extensions)
+    else:
+        return extensions
+
 
 setuptools.setup(
-  name='grpcio-tools',
-  version=grpc_version.VERSION,
-  description='Protobuf code generator for gRPC',
-  author='The gRPC Authors',
-  author_email='grpc-io@googlegroups.com',
-  url='https://grpc.io',
-  license='Apache License 2.0',
-  classifiers=CLASSIFIERS,
-  ext_modules=extension_modules(),
-  packages=setuptools.find_packages('.'),
-  install_requires=[
-    'protobuf>=3.5.0.post1',
-    'grpcio>={version}'.format(version=grpc_version.VERSION),
-  ],
-  package_data=package_data(),
-)
+    name='grpcio-tools',
+    version=grpc_version.VERSION,
+    description='Protobuf code generator for gRPC',
+    author='The gRPC Authors',
+    author_email='grpc-io@googlegroups.com',
+    url='https://grpc.io',
+    license='Apache License 2.0',
+    classifiers=CLASSIFIERS,
+    ext_modules=extension_modules(),
+    packages=setuptools.find_packages('.'),
+    install_requires=[
+        'protobuf>=3.5.0.post1',
+        'grpcio>={version}'.format(version=grpc_version.VERSION),
+    ],
+    package_data=package_data(),)
diff --git a/tools/distrib/python/make_grpcio_tools.py b/tools/distrib/python/make_grpcio_tools.py
index c865f0b..216492a 100755
--- a/tools/distrib/python/make_grpcio_tools.py
+++ b/tools/distrib/python/make_grpcio_tools.py
@@ -27,7 +27,7 @@
 import traceback
 import uuid
 
-DEPS_FILE_CONTENT="""
+DEPS_FILE_CONTENT = """
 # Copyright 2017 gRPC authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -60,15 +60,16 @@
 PROTOBUF_PROTO_PREFIX = '//:src/'
 
 GRPC_ROOT = os.path.abspath(
-    os.path.join(os.path.dirname(os.path.abspath(__file__)),
-                 '..', '..', '..'))
+    os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..'))
 
-GRPC_PYTHON_ROOT = os.path.join(GRPC_ROOT, 'tools', 'distrib',
-                                'python', 'grpcio_tools')
+GRPC_PYTHON_ROOT = os.path.join(GRPC_ROOT, 'tools', 'distrib', 'python',
+                                'grpcio_tools')
 
-GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT = os.path.join('third_party', 'protobuf', 'src')
+GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT = os.path.join('third_party', 'protobuf',
+                                                  'src')
 GRPC_PROTOBUF = os.path.join(GRPC_ROOT, GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT)
-GRPC_PROTOBUF_SUBMODULE_ROOT = os.path.join(GRPC_ROOT, 'third_party', 'protobuf')
+GRPC_PROTOBUF_SUBMODULE_ROOT = os.path.join(GRPC_ROOT, 'third_party',
+                                            'protobuf')
 GRPC_PROTOC_PLUGINS = os.path.join(GRPC_ROOT, 'src', 'compiler')
 GRPC_PYTHON_PROTOBUF = os.path.join(GRPC_PYTHON_ROOT, 'third_party', 'protobuf',
                                     'src')
@@ -80,81 +81,91 @@
 GRPC_INCLUDE = os.path.join(GRPC_ROOT, 'include')
 GRPC_PYTHON_INCLUDE = os.path.join(GRPC_PYTHON_ROOT, 'grpc_root', 'include')
 
-BAZEL_DEPS = os.path.join(GRPC_ROOT, 'tools', 'distrib', 'python', 'bazel_deps.sh')
+BAZEL_DEPS = os.path.join(GRPC_ROOT, 'tools', 'distrib', 'python',
+                          'bazel_deps.sh')
 BAZEL_DEPS_PROTOC_LIB_QUERY = '//:protoc_lib'
 BAZEL_DEPS_COMMON_PROTOS_QUERY = '//:well_known_protos'
 
+
 def protobuf_submodule_commit_hash():
-  """Gets the commit hash for the HEAD of the protobuf submodule currently
+    """Gets the commit hash for the HEAD of the protobuf submodule currently
      checked out."""
-  cwd = os.getcwd()
-  os.chdir(GRPC_PROTOBUF_SUBMODULE_ROOT)
-  output = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
-  os.chdir(cwd)
-  return output.splitlines()[0].strip()
+    cwd = os.getcwd()
+    os.chdir(GRPC_PROTOBUF_SUBMODULE_ROOT)
+    output = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
+    os.chdir(cwd)
+    return output.splitlines()[0].strip()
+
 
 def bazel_query(query):
-  output = subprocess.check_output([BAZEL_DEPS, query])
-  return output.splitlines()
+    output = subprocess.check_output([BAZEL_DEPS, query])
+    return output.splitlines()
+
 
 def get_deps():
-  """Write the result of the bazel query `query` against protobuf to
+    """Write the result of the bazel query `query` against protobuf to
      `out_file`."""
-  cc_files_output = bazel_query(BAZEL_DEPS_PROTOC_LIB_QUERY)
-  cc_files = [
-      name[len(PROTOBUF_CC_PREFIX):] for name in cc_files_output
-      if name.endswith('.cc') and name.startswith(PROTOBUF_CC_PREFIX)]
-  proto_files_output = bazel_query(BAZEL_DEPS_COMMON_PROTOS_QUERY)
-  proto_files = [
-      name[len(PROTOBUF_PROTO_PREFIX):] for name in proto_files_output
-      if name.endswith('.proto') and name.startswith(PROTOBUF_PROTO_PREFIX)]
-  commit_hash = protobuf_submodule_commit_hash()
-  deps_file_content = DEPS_FILE_CONTENT.format(
-      cc_files=cc_files,
-      proto_files=proto_files,
-      cc_include=repr(GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT),
-      proto_include=repr(GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT),
-      commit_hash=COMMIT_HASH_PREFIX + commit_hash + COMMIT_HASH_SUFFIX)
-  return deps_file_content
+    cc_files_output = bazel_query(BAZEL_DEPS_PROTOC_LIB_QUERY)
+    cc_files = [
+        name[len(PROTOBUF_CC_PREFIX):] for name in cc_files_output
+        if name.endswith('.cc') and name.startswith(PROTOBUF_CC_PREFIX)
+    ]
+    proto_files_output = bazel_query(BAZEL_DEPS_COMMON_PROTOS_QUERY)
+    proto_files = [
+        name[len(PROTOBUF_PROTO_PREFIX):] for name in proto_files_output
+        if name.endswith('.proto') and name.startswith(PROTOBUF_PROTO_PREFIX)
+    ]
+    commit_hash = protobuf_submodule_commit_hash()
+    deps_file_content = DEPS_FILE_CONTENT.format(
+        cc_files=cc_files,
+        proto_files=proto_files,
+        cc_include=repr(GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT),
+        proto_include=repr(GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT),
+        commit_hash=COMMIT_HASH_PREFIX + commit_hash + COMMIT_HASH_SUFFIX)
+    return deps_file_content
+
 
 def long_path(path):
-  if os.name == 'nt':
-    return '\\\\?\\' + path
-  else:
-    return path
+    if os.name == 'nt':
+        return '\\\\?\\' + path
+    else:
+        return path
+
 
 def main():
-  os.chdir(GRPC_ROOT)
+    os.chdir(GRPC_ROOT)
 
-  for source, target in [
-      (GRPC_PROTOBUF, GRPC_PYTHON_PROTOBUF),
-      (GRPC_PROTOC_PLUGINS, GRPC_PYTHON_PROTOC_PLUGINS),
-      (GRPC_INCLUDE, GRPC_PYTHON_INCLUDE)]:
-    for source_dir, _, files in os.walk(source):
-      target_dir = os.path.abspath(os.path.join(target, os.path.relpath(source_dir, source)))
-      try:
-        os.makedirs(target_dir)
-      except OSError as error:
-        if error.errno != errno.EEXIST:
-          raise
-      for relative_file in files:
-        source_file = os.path.abspath(os.path.join(source_dir, relative_file))
-        target_file = os.path.abspath(os.path.join(target_dir, relative_file))
-        shutil.copyfile(source_file, target_file)
+    for source, target in [(GRPC_PROTOBUF, GRPC_PYTHON_PROTOBUF),
+                           (GRPC_PROTOC_PLUGINS, GRPC_PYTHON_PROTOC_PLUGINS),
+                           (GRPC_INCLUDE, GRPC_PYTHON_INCLUDE)]:
+        for source_dir, _, files in os.walk(source):
+            target_dir = os.path.abspath(
+                os.path.join(target, os.path.relpath(source_dir, source)))
+            try:
+                os.makedirs(target_dir)
+            except OSError as error:
+                if error.errno != errno.EEXIST:
+                    raise
+            for relative_file in files:
+                source_file = os.path.abspath(
+                    os.path.join(source_dir, relative_file))
+                target_file = os.path.abspath(
+                    os.path.join(target_dir, relative_file))
+                shutil.copyfile(source_file, target_file)
 
-  try:
-    protoc_lib_deps_content = get_deps()
-  except Exception as error:
-    # We allow this script to succeed even if we couldn't get the dependencies,
-    # as then we can assume that even without a successful bazel run the
-    # dependencies currently in source control are 'good enough'.
-    sys.stderr.write("Got non-fatal error:\n")
-    traceback.print_exc(file=sys.stderr)
-    return
-  # If we successfully got the dependencies, truncate and rewrite the deps file.
-  with open(GRPC_PYTHON_PROTOC_LIB_DEPS, 'w') as deps_file:
-    deps_file.write(protoc_lib_deps_content)
+    try:
+        protoc_lib_deps_content = get_deps()
+    except Exception as error:
+        # We allow this script to succeed even if we couldn't get the dependencies,
+        # as then we can assume that even without a successful bazel run the
+        # dependencies currently in source control are 'good enough'.
+        sys.stderr.write("Got non-fatal error:\n")
+        traceback.print_exc(file=sys.stderr)
+        return
+    # If we successfully got the dependencies, truncate and rewrite the deps file.
+    with open(GRPC_PYTHON_PROTOC_LIB_DEPS, 'w') as deps_file:
+        deps_file.write(protoc_lib_deps_content)
+
 
 if __name__ == '__main__':
-  main()
-
+    main()
diff --git a/tools/distrib/python/submit.py b/tools/distrib/python/submit.py
index 92eab5a..aff71b5 100755
--- a/tools/distrib/python/submit.py
+++ b/tools/distrib/python/submit.py
@@ -21,43 +21,52 @@
 parser = argparse.ArgumentParser(
     description='Submit the package to a PyPI repository.')
 parser.add_argument(
-    '--repository', '-r', metavar='r', type=str, default='pypi',
+    '--repository',
+    '-r',
+    metavar='r',
+    type=str,
+    default='pypi',
     help='The repository to push the package to. '
-         'Ensure the value appears in your .pypirc file. '
-         'Defaults to "pypi".'
-)
+    'Ensure the value appears in your .pypirc file. '
+    'Defaults to "pypi".')
 parser.add_argument(
-    '--identity', '-i', metavar='i', type=str,
-    help='GPG identity to sign the files with.'
-)
+    '--identity',
+    '-i',
+    metavar='i',
+    type=str,
+    help='GPG identity to sign the files with.')
 parser.add_argument(
-    '--username', '-u', metavar='u', type=str,
+    '--username',
+    '-u',
+    metavar='u',
+    type=str,
     help='Username to authenticate with the repository. Not needed if you have '
-         'configured your .pypirc to include your username.'
-)
+    'configured your .pypirc to include your username.')
 parser.add_argument(
-    '--password', '-p', metavar='p', type=str,
+    '--password',
+    '-p',
+    metavar='p',
+    type=str,
     help='Password to authenticate with the repository. Not needed if you have '
-         'configured your .pypirc to include your password.'
-)
+    'configured your .pypirc to include your password.')
 parser.add_argument(
-    '--bdist', '-b', action='store_true',
-    help='Generate a binary distribution (wheel) for the current OS.'
-)
+    '--bdist',
+    '-b',
+    action='store_true',
+    help='Generate a binary distribution (wheel) for the current OS.')
 parser.add_argument(
-    '--dist-args', type=str,
-    help='Additional arguments to pass to the *dist setup.py command.'
-)
+    '--dist-args',
+    type=str,
+    help='Additional arguments to pass to the *dist setup.py command.')
 args = parser.parse_args()
 
 # Move to the root directory of Python GRPC.
-pkgdir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
-                      '../../../')
+pkgdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../')
 # Remove previous distributions; they somehow confuse twine.
 try:
-  shutil.rmtree(os.path.join(pkgdir, 'dist/'))
+    shutil.rmtree(os.path.join(pkgdir, 'dist/'))
 except:
-  pass
+    pass
 
 # Build the Cython C files
 build_env = os.environ.copy()
@@ -67,20 +76,20 @@
 
 # Make the push.
 if args.bdist:
-  cmd = ['python', 'setup.py', 'bdist_wheel']
+    cmd = ['python', 'setup.py', 'bdist_wheel']
 else:
-  cmd = ['python', 'setup.py', 'sdist']
+    cmd = ['python', 'setup.py', 'sdist']
 if args.dist_args:
-  cmd += args.dist_args.split()
+    cmd += args.dist_args.split()
 subprocess.call(cmd, cwd=pkgdir)
 
 cmd = ['twine', 'upload', '-r', args.repository]
 if args.identity is not None:
-  cmd.extend(['-i', args.identity])
+    cmd.extend(['-i', args.identity])
 if args.username is not None:
-  cmd.extend(['-u', args.username])
+    cmd.extend(['-u', args.username])
 if args.password is not None:
-  cmd.extend(['-p', args.password])
+    cmd.extend(['-p', args.password])
 cmd.append('dist/*')
 
 subprocess.call(cmd, cwd=pkgdir)
diff --git a/tools/distrib/run_clang_tidy.py b/tools/distrib/run_clang_tidy.py
index d002a04..3ac712e 100755
--- a/tools/distrib/run_clang_tidy.py
+++ b/tools/distrib/run_clang_tidy.py
@@ -20,51 +20,51 @@
 import multiprocessing
 
 sys.path.append(
-  os.path.join(
-    os.path.dirname(sys.argv[0]), '..', 'run_tests', 'python_utils'))
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', 'run_tests', 'python_utils'))
 import jobset
 
 GRPC_CHECKS = [
-  'modernize-use-nullptr',
+    'modernize-use-nullptr',
 ]
 
 extra_args = [
-  '-x',
-  'c++',
-  '-std=c++11',
+    '-x',
+    'c++',
+    '-std=c++11',
 ]
 with open('.clang_complete') as f:
-  for line in f:
-    line = line.strip()
-    if line.startswith('-I'):
-      extra_args.append(line)
+    for line in f:
+        line = line.strip()
+        if line.startswith('-I'):
+            extra_args.append(line)
 
 clang_tidy = os.environ.get('CLANG_TIDY', 'clang-tidy')
 
 argp = argparse.ArgumentParser(description='Run clang-tidy against core')
 argp.add_argument('files', nargs='+', help='Files to tidy')
 argp.add_argument('--fix', dest='fix', action='store_true')
-argp.add_argument('-j', '--jobs', type=int, default=multiprocessing.cpu_count(),
-                  help='Number of CPUs to use')
+argp.add_argument(
+    '-j',
+    '--jobs',
+    type=int,
+    default=multiprocessing.cpu_count(),
+    help='Number of CPUs to use')
 argp.set_defaults(fix=False)
 args = argp.parse_args()
 
 cmdline = [
-    clang_tidy,
-    '--checks=-*,%s' % ','.join(GRPC_CHECKS),
+    clang_tidy, '--checks=-*,%s' % ','.join(GRPC_CHECKS),
     '--warnings-as-errors=%s' % ','.join(GRPC_CHECKS)
-] + [
-    '--extra-arg-before=%s' % arg
-    for arg in extra_args
-]
+] + ['--extra-arg-before=%s' % arg for arg in extra_args]
 
 if args.fix:
-  cmdline.append('--fix')
+    cmdline.append('--fix')
 
 jobs = []
 for filename in args.files:
-  jobs.append(jobset.JobSpec(cmdline + [filename],
-                             shortname=filename,
-                             ))#verbose_success=True))
+    jobs.append(jobset.JobSpec(
+        cmdline + [filename],
+        shortname=filename,))  #verbose_success=True))
 
 jobset.run(jobs, maxjobs=args.jobs)
diff --git a/tools/distrib/yapf_code.sh b/tools/distrib/yapf_code.sh
index 51d1799..fb14f36 100755
--- a/tools/distrib/yapf_code.sh
+++ b/tools/distrib/yapf_code.sh
@@ -19,9 +19,13 @@
 cd "$(dirname "${0}")/../.."
 
 DIRS=(
+    'examples/python'
     'src/python'
+    'tools'
 )
 EXCLUSIONS=(
+    '*protoc_lib_deps.py'  # this file is auto-generated
+    '*_pb2*.py'  # no need to format protoc generated files
 )
 
 VIRTUALENV=yapf_virtual_environment
@@ -50,7 +54,7 @@
 	tempdir=$(mktemp -d)
 	cp -RT "${dir}" "${tempdir}"
 	yapf "${tempdir}"
-	diff -ru "${dir}" "${tempdir}" || ok=no
+	diff -x '*.pyc' -ru "${dir}" "${tempdir}" || ok=no
 	rm -rf "${tempdir}"
     done
     if [[ ${ok} == no ]]; then
diff --git a/tools/failures/detect_new_failures.py b/tools/failures/detect_new_failures.py
new file mode 100644
index 0000000..87fd1d9
--- /dev/null
+++ b/tools/failures/detect_new_failures.py
@@ -0,0 +1,307 @@
+#!/usr/bin/env python
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Detect new flakes and create issues for them"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import datetime
+import json
+import logging
+import os
+import pprint
+import sys
+import urllib
+import urllib2
+from collections import namedtuple
+
+gcp_utils_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../gcp/utils'))
+sys.path.append(gcp_utils_dir)
+
+import big_query_utils
+
+GH_ISSUE_CREATION_URL = 'https://api.github.com/repos/grpc/grpc/issues'
+GH_ISSUE_SEARCH_URL = 'https://api.github.com/search/issues'
+KOKORO_BASE_URL = 'https://kokoro2.corp.google.com/job/'
+
+
+def gh(url, data=None):
+    request = urllib2.Request(url, data=data)
+    assert TOKEN
+    request.add_header('Authorization', 'token {}'.format(TOKEN))
+    if data:
+        request.add_header('Content-type', 'application/json')
+    response = urllib2.urlopen(request)
+    if 200 <= response.getcode() < 300:
+        return json.loads(response.read())
+    else:
+        raise ValueError('Error ({}) accessing {}'.format(response.getcode(),
+                                                          response.geturl()))
+
+
+def search_gh_issues(search_term, status='open'):
+    params = ' '.join((search_term, 'is:issue', 'is:open', 'repo:grpc/grpc'))
+    qargs = urllib.urlencode({'q': params})
+    url = '?'.join((GH_ISSUE_SEARCH_URL, qargs))
+    response = gh(url)
+    return response
+
+
+def create_gh_issue(title, body, labels, assignees=[]):
+    params = {'title': title, 'body': body, 'labels': labels}
+    if assignees:
+        params['assignees'] = assignees
+    data = json.dumps(params)
+    response = gh(GH_ISSUE_CREATION_URL, data)
+    issue_url = response['html_url']
+    print('Created issue {} for {}'.format(issue_url, title))
+
+
+def build_kokoro_url(job_name, build_id):
+    job_path = '{}/{}'.format('/job/'.join(job_name.split('/')), build_id)
+    return KOKORO_BASE_URL + job_path
+
+
+def create_issues(new_flakes, always_create):
+    for test_name, results_row in new_flakes.items():
+        poll_strategy, job_name, build_id, timestamp = results_row
+        # TODO(dgq): the Kokoro URL has a limited lifetime. The permanent and ideal
+        # URL would be the sponge one, but there's currently no easy way to retrieve
+        # it.
+        url = build_kokoro_url(job_name, build_id)
+        title = 'New Failure: ' + test_name
+        body = '- Test: {}\n- Poll Strategy: {}\n- URL: {}'.format(
+            test_name, poll_strategy, url)
+        labels = ['infra/New Failure']
+        if always_create:
+            proceed = True
+        else:
+            preexisting_issues = search_gh_issues(test_name)
+            if preexisting_issues['total_count'] > 0:
+                print('\nFound {} issues for "{}":'.format(preexisting_issues[
+                    'total_count'], test_name))
+                for issue in preexisting_issues['items']:
+                    print('\t"{}" ; URL: {}'.format(issue['title'], issue[
+                        'html_url']))
+            else:
+                print(
+                    '\nNo preexisting issues found for "{}"'.format(test_name))
+            proceed = raw_input(
+                'Create issue for:\nTitle: {}\nBody: {}\n[Y/n] '.format(
+                    title, body)) in ('y', 'Y', '')
+        if proceed:
+            assignees_str = raw_input(
+                'Asignees? (comma-separated, leave blank for unassigned): ')
+            assignees = [
+                assignee.strip() for assignee in assignees_str.split(',')
+            ]
+            create_gh_issue(title, body, labels, assignees)
+
+
+def print_table(table, format):
+    first_time = True
+    for test_name, results_row in table.items():
+        poll_strategy, job_name, build_id, timestamp = results_row
+        full_kokoro_url = build_kokoro_url(job_name, build_id)
+        if format == 'human':
+            print("\t- Test: {}, Polling: {}, Timestamp: {}, url: {}".format(
+                test_name, poll_strategy, timestamp, full_kokoro_url))
+        else:
+            assert (format == 'csv')
+            if first_time:
+                print('test,timestamp,url')
+                first_time = False
+            print("{},{},{}".format(test_name, timestamp, full_kokoro_url))
+
+
+Row = namedtuple('Row', ['poll_strategy', 'job_name', 'build_id', 'timestamp'])
+
+
+def get_new_failures(dates):
+    bq = big_query_utils.create_big_query()
+    this_script_path = os.path.join(os.path.dirname(__file__))
+    sql_script = os.path.join(this_script_path, 'sql/new_failures_24h.sql')
+    with open(sql_script) as query_file:
+        query = query_file.read().format(
+            calibration_begin=dates['calibration']['begin'],
+            calibration_end=dates['calibration']['end'],
+            reporting_begin=dates['reporting']['begin'],
+            reporting_end=dates['reporting']['end'])
+    logging.debug("Query:\n%s", query)
+    query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
+    page = bq.jobs().getQueryResults(
+        pageToken=None, **query_job['jobReference']).execute(num_retries=3)
+    rows = page.get('rows')
+    if rows:
+        return {
+            row['f'][0]['v']: Row(poll_strategy=row['f'][1]['v'],
+                                  job_name=row['f'][2]['v'],
+                                  build_id=row['f'][3]['v'],
+                                  timestamp=row['f'][4]['v'])
+            for row in rows
+        }
+    else:
+        return {}
+
+
+def parse_isodate(date_str):
+    return datetime.datetime.strptime(date_str, "%Y-%m-%d").date()
+
+
+def get_new_flakes(args):
+    """The from_date_str argument marks the beginning of the "calibration", used
+  to establish the set of pre-existing flakes, which extends over
+  "calibration_days".  After the calibration period, "reporting_days" is the
+  length of time during which new flakes will be reported.
+
+from
+date
+  |--------------------|---------------|
+  ^____________________^_______________^
+       calibration         reporting
+         days                days
+  """
+    dates = process_date_args(args)
+    new_failures = get_new_failures(dates)
+    logging.info('|new failures| = %d', len(new_failures))
+    return new_failures
+
+
+def build_args_parser():
+    import argparse, datetime
+    parser = argparse.ArgumentParser()
+    today = datetime.date.today()
+    a_week_ago = today - datetime.timedelta(days=7)
+    parser.add_argument(
+        '--calibration_days',
+        type=int,
+        default=7,
+        help='How many days to consider for pre-existing flakes.')
+    parser.add_argument(
+        '--reporting_days',
+        type=int,
+        default=1,
+        help='How many days to consider for the detection of new flakes.')
+    parser.add_argument(
+        '--count_only',
+        dest='count_only',
+        action='store_true',
+        help='Display only number of new flakes.')
+    parser.set_defaults(count_only=False)
+    parser.add_argument(
+        '--create_issues',
+        dest='create_issues',
+        action='store_true',
+        help='Create issues for all new flakes.')
+    parser.set_defaults(create_issues=False)
+    parser.add_argument(
+        '--always_create_issues',
+        dest='always_create_issues',
+        action='store_true',
+        help='Always create issues for all new flakes. Otherwise,'
+        ' interactively prompt for every issue.')
+    parser.set_defaults(always_create_issues=False)
+    parser.add_argument(
+        '--token',
+        type=str,
+        default='',
+        help='GitHub token to use its API with a higher rate limit')
+    parser.add_argument(
+        '--format',
+        type=str,
+        choices=['human', 'csv'],
+        default='human',
+        help='Output format: are you a human or a machine?')
+    parser.add_argument(
+        '--loglevel',
+        type=str,
+        choices=['INFO', 'DEBUG', 'WARNING', 'ERROR', 'CRITICAL'],
+        default='WARNING',
+        help='Logging level.')
+    return parser
+
+
+def process_date_args(args):
+    calibration_begin = (
+        datetime.date.today() - datetime.timedelta(days=args.calibration_days) -
+        datetime.timedelta(days=args.reporting_days))
+    calibration_end = calibration_begin + datetime.timedelta(
+        days=args.calibration_days)
+    reporting_begin = calibration_end
+    reporting_end = reporting_begin + datetime.timedelta(
+        days=args.reporting_days)
+    return {
+        'calibration': {
+            'begin': calibration_begin,
+            'end': calibration_end
+        },
+        'reporting': {
+            'begin': reporting_begin,
+            'end': reporting_end
+        }
+    }
+
+
+def main():
+    global TOKEN
+    args_parser = build_args_parser()
+    args = args_parser.parse_args()
+    if args.create_issues and not args.token:
+        raise ValueError(
+            'Missing --token argument, needed to create GitHub issues')
+    TOKEN = args.token
+
+    logging_level = getattr(logging, args.loglevel)
+    logging.basicConfig(format='%(asctime)s %(message)s', level=logging_level)
+    new_flakes = get_new_flakes(args)
+
+    dates = process_date_args(args)
+
+    dates_info_string = 'from {} until {} (calibrated from {} until {})'.format(
+        dates['reporting']['begin'].isoformat(),
+        dates['reporting']['end'].isoformat(),
+        dates['calibration']['begin'].isoformat(),
+        dates['calibration']['end'].isoformat())
+
+    if args.format == 'human':
+        if args.count_only:
+            print(len(new_flakes), dates_info_string)
+        elif new_flakes:
+            found_msg = 'Found {} new flakes {}'.format(
+                len(new_flakes), dates_info_string)
+            print(found_msg)
+            print('*' * len(found_msg))
+            print_table(new_flakes, 'human')
+            if args.create_issues:
+                create_issues(new_flakes, args.always_create_issues)
+        else:
+            print('No new flakes found '.format(len(new_flakes)),
+                  dates_info_string)
+    elif args.format == 'csv':
+        if args.count_only:
+            print('from_date,to_date,count')
+            print('{},{},{}'.format(dates['reporting']['begin'].isoformat(
+            ), dates['reporting']['end'].isoformat(), len(new_flakes)))
+        else:
+            print_table(new_flakes, 'csv')
+    else:
+        raise ValueError(
+            'Invalid argument for --format: {}'.format(args.format))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tools/failures/sql/new_failures_24h.sql b/tools/failures/sql/new_failures_24h.sql
new file mode 100644
index 0000000..6ce0c5d
--- /dev/null
+++ b/tools/failures/sql/new_failures_24h.sql
@@ -0,0 +1,62 @@
+#standardSQL
+WITH calibration AS (
+  SELECT
+    RTRIM(LTRIM(REGEXP_REPLACE(filtered_test_name, r'(/\d+)|(bins/.+/)|(cmake/.+/.+/)', ''))) AS test_binary,
+    REGEXP_EXTRACT(test_name, r'GRPC_POLL_STRATEGY=(\w+)') AS poll_strategy,
+    job_name,
+    build_id
+  FROM (
+    SELECT
+      REGEXP_REPLACE(test_name, r'(/\d+)|(GRPC_POLL_STRATEGY=.+)', '') AS filtered_test_name,
+      test_name,
+      job_name,
+      build_id,
+      timestamp
+    FROM
+      `grpc-testing.jenkins_test_results.aggregate_results`
+    WHERE
+      timestamp > TIMESTAMP(DATETIME("{calibration_begin} 00:00:00", "America/Los_Angeles"))
+      AND timestamp <= TIMESTAMP(DATETIME("{calibration_end} 23:59:59", "America/Los_Angeles"))
+      AND NOT REGEXP_CONTAINS(job_name,
+        'portability')
+      AND result != 'PASSED'
+      AND result != 'SKIPPED' )),
+  reporting AS (
+  SELECT
+    RTRIM(LTRIM(REGEXP_REPLACE(filtered_test_name, r'(/\d+)|(bins/.+/)|(cmake/.+/.+/)', ''))) AS test_binary,
+    REGEXP_EXTRACT(test_name, r'GRPC_POLL_STRATEGY=(\w+)') AS poll_strategy,
+    job_name,
+    build_id,
+    timestamp
+  FROM (
+    SELECT
+      REGEXP_REPLACE(test_name, r'(/\d+)|(GRPC_POLL_STRATEGY=.+)', '') AS filtered_test_name,
+      test_name,
+      job_name,
+      build_id,
+      timestamp
+    FROM
+      `grpc-testing.jenkins_test_results.aggregate_results`
+    WHERE
+      timestamp > TIMESTAMP(DATETIME("{reporting_begin} 00:00:00", "America/Los_Angeles"))
+      AND timestamp <= TIMESTAMP(DATETIME("{reporting_end} 23:59:59", "America/Los_Angeles"))
+      AND NOT REGEXP_CONTAINS(job_name,
+        'portability')
+      AND result != 'PASSED'
+      AND result != 'SKIPPED' ))
+SELECT
+  reporting.test_binary,
+  reporting.poll_strategy,
+  reporting.job_name,
+  reporting.build_id,
+  STRING(reporting.timestamp, "America/Los_Angeles") as timestamp_MTV
+FROM
+  reporting
+LEFT JOIN
+  calibration
+ON
+  reporting.test_binary = calibration.test_binary
+WHERE
+  calibration.test_binary IS NULL
+ORDER BY
+  timestamp DESC;
diff --git a/tools/flakes/detect_flakes.py b/tools/flakes/detect_flakes.py
deleted file mode 100644
index c5c7f61..0000000
--- a/tools/flakes/detect_flakes.py
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Detect new flakes introduced in the last 24h hours with respect to the
-previous six days"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import datetime
-import os
-import sys
-import logging
-logging.basicConfig(format='%(asctime)s %(message)s')
-
-gcp_utils_dir = os.path.abspath(
-    os.path.join(os.path.dirname(__file__), '../gcp/utils'))
-sys.path.append(gcp_utils_dir)
-
-import big_query_utils
-
-def print_table(table):
-    kokoro_base_url = 'https://kokoro.corp.google.com/job/'
-    for k, v in table.items():
-      job_name = v[0]
-      build_id = v[1]
-      ts = int(float(v[2]))
-      # TODO(dgq): timezone handling is wrong. We need to determine the timezone
-      # of the computer running this script.
-      human_ts = datetime.datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S PDT')
-      job_path = '{}/{}'.format('/job/'.join(job_name.split('/')), build_id)
-      full_kokoro_url = kokoro_base_url + job_path
-      print("Test: {}, Timestamp: {}, url: {}\n".format(k, human_ts, full_kokoro_url))
-
-
-def get_flaky_tests(days_lower_bound, days_upper_bound, limit=None):
-  """ period is one of "WEEK", "DAY", etc.
-  (see https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-and-operators#date_add). """
-
-  bq = big_query_utils.create_big_query()
-  query = """
-SELECT
-  REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
-  job_name,
-  build_id,
-  timestamp
-FROM
-  [grpc-testing:jenkins_test_results.aggregate_results]
-WHERE
-    timestamp > DATE_ADD(CURRENT_DATE(), {days_lower_bound}, "DAY")
-    AND timestamp <= DATE_ADD(CURRENT_DATE(), {days_upper_bound}, "DAY")
-  AND NOT REGEXP_MATCH(job_name, '.*portability.*')
-  AND result != 'PASSED' AND result != 'SKIPPED'
-ORDER BY timestamp desc
-""".format(days_lower_bound=days_lower_bound, days_upper_bound=days_upper_bound)
-  if limit:
-    query += '\n LIMIT {}'.format(limit)
-  query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
-  page = bq.jobs().getQueryResults(
-      pageToken=None, **query_job['jobReference']).execute(num_retries=3)
-  rows = page.get('rows')
-  if rows:
-    return {row['f'][0]['v']:
-            (row['f'][1]['v'], row['f'][2]['v'], row['f'][3]['v'])
-            for row in rows}
-  else:
-    return {}
-
-
-def get_new_flakes():
-  last_week_sans_yesterday = get_flaky_tests(-14, -1)
-  last_24 = get_flaky_tests(0, +1)
-  last_week_sans_yesterday_names = set(last_week_sans_yesterday.keys())
-  last_24_names = set(last_24.keys())
-  logging.debug('|last_week_sans_yesterday| =', len(last_week_sans_yesterday_names))
-  logging.debug('|last_24_names| =', len(last_24_names))
-  new_flakes = last_24_names - last_week_sans_yesterday_names
-  logging.debug('|new_flakes| = ', len(new_flakes))
-  return {k: last_24[k] for k in new_flakes}
-
-
-def main():
-  new_flakes = get_new_flakes()
-  if new_flakes:
-    print("Found {} new flakes:".format(len(new_flakes)))
-    print_table(new_flakes)
-  else:
-    print("No new flakes found!")
-
-
-if __name__ == '__main__':
-  main()
diff --git a/tools/gcp/utils/big_query_utils.py b/tools/gcp/utils/big_query_utils.py
index 77a5f56..3e811ca 100755
--- a/tools/gcp/utils/big_query_utils.py
+++ b/tools/gcp/utils/big_query_utils.py
@@ -28,154 +28,174 @@
 
 
 def create_big_query():
-  """Authenticates with cloud platform and gets a BiqQuery service object
+    """Authenticates with cloud platform and gets a BiqQuery service object
   """
-  creds = GoogleCredentials.get_application_default()
-  return discovery.build('bigquery', 'v2', credentials=creds, cache_discovery=False)
+    creds = GoogleCredentials.get_application_default()
+    return discovery.build(
+        'bigquery', 'v2', credentials=creds, cache_discovery=False)
 
 
 def create_dataset(biq_query, project_id, dataset_id):
-  is_success = True
-  body = {
-      'datasetReference': {
-          'projectId': project_id,
-          'datasetId': dataset_id
-      }
-  }
+    is_success = True
+    body = {
+        'datasetReference': {
+            'projectId': project_id,
+            'datasetId': dataset_id
+        }
+    }
 
-  try:
-    dataset_req = biq_query.datasets().insert(projectId=project_id, body=body)
-    dataset_req.execute(num_retries=NUM_RETRIES)
-  except HttpError as http_error:
-    if http_error.resp.status == 409:
-      print 'Warning: The dataset %s already exists' % dataset_id
-    else:
-      # Note: For more debugging info, print "http_error.content"
-      print 'Error in creating dataset: %s. Err: %s' % (dataset_id, http_error)
-      is_success = False
-  return is_success
+    try:
+        dataset_req = biq_query.datasets().insert(
+            projectId=project_id, body=body)
+        dataset_req.execute(num_retries=NUM_RETRIES)
+    except HttpError as http_error:
+        if http_error.resp.status == 409:
+            print 'Warning: The dataset %s already exists' % dataset_id
+        else:
+            # Note: For more debugging info, print "http_error.content"
+            print 'Error in creating dataset: %s. Err: %s' % (dataset_id,
+                                                              http_error)
+            is_success = False
+    return is_success
 
 
 def create_table(big_query, project_id, dataset_id, table_id, table_schema,
                  description):
-  fields = [{'name': field_name,
-             'type': field_type,
-             'description': field_description
-             } for (field_name, field_type, field_description) in table_schema]
-  return create_table2(big_query, project_id, dataset_id, table_id,
-                       fields, description)
+    fields = [{
+        'name': field_name,
+        'type': field_type,
+        'description': field_description
+    } for (field_name, field_type, field_description) in table_schema]
+    return create_table2(big_query, project_id, dataset_id, table_id, fields,
+                         description)
 
 
-def create_partitioned_table(big_query, project_id, dataset_id, table_id, table_schema,
-                             description, partition_type='DAY', expiration_ms=_EXPIRATION_MS):
-  """Creates a partitioned table. By default, a date-paritioned table is created with
+def create_partitioned_table(big_query,
+                             project_id,
+                             dataset_id,
+                             table_id,
+                             table_schema,
+                             description,
+                             partition_type='DAY',
+                             expiration_ms=_EXPIRATION_MS):
+    """Creates a partitioned table. By default, a date-paritioned table is created with
   each partition lasting 30 days after it was last modified.
   """
-  fields = [{'name': field_name,
-             'type': field_type,
-             'description': field_description
-             } for (field_name, field_type, field_description) in table_schema]
-  return create_table2(big_query, project_id, dataset_id, table_id,
-                       fields, description, partition_type, expiration_ms)
+    fields = [{
+        'name': field_name,
+        'type': field_type,
+        'description': field_description
+    } for (field_name, field_type, field_description) in table_schema]
+    return create_table2(big_query, project_id, dataset_id, table_id, fields,
+                         description, partition_type, expiration_ms)
 
 
-def create_table2(big_query, project_id, dataset_id, table_id, fields_schema,
-                 description, partition_type=None, expiration_ms=None):
-  is_success = True
+def create_table2(big_query,
+                  project_id,
+                  dataset_id,
+                  table_id,
+                  fields_schema,
+                  description,
+                  partition_type=None,
+                  expiration_ms=None):
+    is_success = True
 
-  body = {
-      'description': description,
-      'schema': {
-          'fields': fields_schema
-      },
-      'tableReference': {
-          'datasetId': dataset_id,
-          'projectId': project_id,
-          'tableId': table_id
-      }
-  }
-
-  if partition_type and expiration_ms:
-    body["timePartitioning"] = {
-      "type": partition_type,
-      "expirationMs": expiration_ms
+    body = {
+        'description': description,
+        'schema': {
+            'fields': fields_schema
+        },
+        'tableReference': {
+            'datasetId': dataset_id,
+            'projectId': project_id,
+            'tableId': table_id
+        }
     }
 
-  try:
-    table_req = big_query.tables().insert(projectId=project_id,
-                                          datasetId=dataset_id,
-                                          body=body)
-    res = table_req.execute(num_retries=NUM_RETRIES)
-    print 'Successfully created %s "%s"' % (res['kind'], res['id'])
-  except HttpError as http_error:
-    if http_error.resp.status == 409:
-      print 'Warning: Table %s already exists' % table_id
-    else:
-      print 'Error in creating table: %s. Err: %s' % (table_id, http_error)
-      is_success = False
-  return is_success
+    if partition_type and expiration_ms:
+        body["timePartitioning"] = {
+            "type": partition_type,
+            "expirationMs": expiration_ms
+        }
+
+    try:
+        table_req = big_query.tables().insert(
+            projectId=project_id, datasetId=dataset_id, body=body)
+        res = table_req.execute(num_retries=NUM_RETRIES)
+        print 'Successfully created %s "%s"' % (res['kind'], res['id'])
+    except HttpError as http_error:
+        if http_error.resp.status == 409:
+            print 'Warning: Table %s already exists' % table_id
+        else:
+            print 'Error in creating table: %s. Err: %s' % (table_id,
+                                                            http_error)
+            is_success = False
+    return is_success
 
 
 def patch_table(big_query, project_id, dataset_id, table_id, fields_schema):
-  is_success = True
+    is_success = True
 
-  body = {
-      'schema': {
-          'fields': fields_schema
-      },
-      'tableReference': {
-          'datasetId': dataset_id,
-          'projectId': project_id,
-          'tableId': table_id
-      }
-  }
+    body = {
+        'schema': {
+            'fields': fields_schema
+        },
+        'tableReference': {
+            'datasetId': dataset_id,
+            'projectId': project_id,
+            'tableId': table_id
+        }
+    }
 
-  try:
-    table_req = big_query.tables().patch(projectId=project_id,
-                                         datasetId=dataset_id,
-                                         tableId=table_id,
-                                         body=body)
-    res = table_req.execute(num_retries=NUM_RETRIES)
-    print 'Successfully patched %s "%s"' % (res['kind'], res['id'])
-  except HttpError as http_error:
-    print 'Error in creating table: %s. Err: %s' % (table_id, http_error)
-    is_success = False
-  return is_success
+    try:
+        table_req = big_query.tables().patch(
+            projectId=project_id,
+            datasetId=dataset_id,
+            tableId=table_id,
+            body=body)
+        res = table_req.execute(num_retries=NUM_RETRIES)
+        print 'Successfully patched %s "%s"' % (res['kind'], res['id'])
+    except HttpError as http_error:
+        print 'Error in creating table: %s. Err: %s' % (table_id, http_error)
+        is_success = False
+    return is_success
 
 
 def insert_rows(big_query, project_id, dataset_id, table_id, rows_list):
-  is_success = True
-  body = {'rows': rows_list}
-  try:
-    insert_req = big_query.tabledata().insertAll(projectId=project_id,
-                                                 datasetId=dataset_id,
-                                                 tableId=table_id,
-                                                 body=body)
-    res = insert_req.execute(num_retries=NUM_RETRIES)
-    if res.get('insertErrors', None):
-      print 'Error inserting rows! Response: %s' % res
-      is_success = False
-  except HttpError as http_error:
-    print 'Error inserting rows to the table %s' % table_id
-    is_success = False
+    is_success = True
+    body = {'rows': rows_list}
+    try:
+        insert_req = big_query.tabledata().insertAll(
+            projectId=project_id,
+            datasetId=dataset_id,
+            tableId=table_id,
+            body=body)
+        res = insert_req.execute(num_retries=NUM_RETRIES)
+        if res.get('insertErrors', None):
+            print 'Error inserting rows! Response: %s' % res
+            is_success = False
+    except HttpError as http_error:
+        print 'Error inserting rows to the table %s' % table_id
+        is_success = False
 
-  return is_success
+    return is_success
 
 
 def sync_query_job(big_query, project_id, query, timeout=5000):
-  query_data = {'query': query, 'timeoutMs': timeout}
-  query_job = None
-  try:
-    query_job = big_query.jobs().query(
-        projectId=project_id,
-        body=query_data).execute(num_retries=NUM_RETRIES)
-  except HttpError as http_error:
-    print 'Query execute job failed with error: %s' % http_error
-    print http_error.content
-  return query_job
+    query_data = {'query': query, 'timeoutMs': timeout}
+    query_job = None
+    try:
+        query_job = big_query.jobs().query(
+            projectId=project_id,
+            body=query_data).execute(num_retries=NUM_RETRIES)
+    except HttpError as http_error:
+        print 'Query execute job failed with error: %s' % http_error
+        print http_error.content
+    return query_job
 
-  # List of (column name, column type, description) tuples
+
+    # List of (column name, column type, description) tuples
 def make_row(unique_row_id, row_values_dict):
-  """row_values_dict is a dictionary of column name and column value.
+    """row_values_dict is a dictionary of column name and column value.
   """
-  return {'insertId': unique_row_id, 'json': row_values_dict}
+    return {'insertId': unique_row_id, 'json': row_values_dict}
diff --git a/tools/github/pr_latency.py b/tools/github/pr_latency.py
index 5d63583..0131e60 100644
--- a/tools/github/pr_latency.py
+++ b/tools/github/pr_latency.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Measure the time between PR creation and completion of all tests.
 
 You'll need a github API token to avoid being rate-limited. See
@@ -46,118 +45,156 @@
 
 
 def gh(url):
-  request = urllib2.Request(url)
-  if TOKEN:
-    request.add_header('Authorization', 'token {}'.format(TOKEN))
-  response = urllib2.urlopen(request)
-  return response.read()
+    request = urllib2.Request(url)
+    if TOKEN:
+        request.add_header('Authorization', 'token {}'.format(TOKEN))
+    response = urllib2.urlopen(request)
+    return response.read()
 
 
 def print_csv_header():
-  print('pr,base_time,test_time,latency_seconds,successes,failures,errors')
+    print('pr,base_time,test_time,latency_seconds,successes,failures,errors')
 
 
-def output(pr, base_time, test_time, diff_time, successes, failures, errors, mode='human'):
-  if mode == 'human':
-    print("PR #{} base time: {} UTC, Tests completed at: {} UTC. Latency: {}."
-          "\n\tSuccesses: {}, Failures: {}, Errors: {}".format(
-              pr, base_time, test_time, diff_time, successes, failures, errors))
-  elif mode == 'csv':
-    print(','.join([str(pr), str(base_time),
-                    str(test_time), str(int((test_time-base_time).total_seconds())),
-                    str(successes), str(failures), str(errors)]))
+def output(pr,
+           base_time,
+           test_time,
+           diff_time,
+           successes,
+           failures,
+           errors,
+           mode='human'):
+    if mode == 'human':
+        print(
+            "PR #{} base time: {} UTC, Tests completed at: {} UTC. Latency: {}."
+            "\n\tSuccesses: {}, Failures: {}, Errors: {}".format(
+                pr, base_time, test_time, diff_time, successes, failures,
+                errors))
+    elif mode == 'csv':
+        print(','.join([
+            str(pr), str(base_time), str(test_time), str(
+                int((test_time - base_time).total_seconds())), str(successes),
+            str(failures), str(errors)
+        ]))
 
 
 def parse_timestamp(datetime_str):
-  return datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%SZ')
+    return datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%SZ')
 
 
 def to_posix_timestamp(dt):
-  return str((dt - datetime(1970, 1, 1)).total_seconds())
+    return str((dt - datetime(1970, 1, 1)).total_seconds())
 
 
 def get_pr_data():
-  latest_prs = json.loads(gh(PRS))
-  res =  [{'number': pr['number'],
-           'created_at': parse_timestamp(pr['created_at']),
-           'updated_at': parse_timestamp(pr['updated_at']),
-           'statuses_url': pr['statuses_url']}
-          for pr in latest_prs]
-  return res
+    latest_prs = json.loads(gh(PRS))
+    res = [{
+        'number': pr['number'],
+        'created_at': parse_timestamp(pr['created_at']),
+        'updated_at': parse_timestamp(pr['updated_at']),
+        'statuses_url': pr['statuses_url']
+    } for pr in latest_prs]
+    return res
 
 
 def get_commits_data(pr_number):
-  commits = json.loads(gh(COMMITS.format(pr_number=pr_number)))
-  return {'num_commits': len(commits),
-          'most_recent_date': parse_timestamp(commits[-1]['commit']['author']['date'])}
+    commits = json.loads(gh(COMMITS.format(pr_number=pr_number)))
+    return {
+        'num_commits': len(commits),
+        'most_recent_date':
+        parse_timestamp(commits[-1]['commit']['author']['date'])
+    }
 
 
 def get_status_data(statuses_url, system):
-  status_url = statuses_url.replace('statuses', 'status')
-  statuses = json.loads(gh(status_url + '?per_page=100'))
-  successes = 0
-  failures = 0
-  errors = 0
-  latest_datetime = None
-  if not statuses: return None
-  if system == 'kokoro': string_in_target_url = 'kokoro'
-  elif system == 'jenkins': string_in_target_url = 'grpc-testing'
-  for status in statuses['statuses']:
-    if not status['target_url'] or string_in_target_url not in status['target_url']: continue  # Ignore jenkins
-    if status['state'] == 'pending': return None
-    elif status['state'] == 'success': successes += 1
-    elif status['state'] == 'failure': failures += 1
-    elif status['state'] == 'error': errors += 1
-    if not latest_datetime:
-      latest_datetime = parse_timestamp(status['updated_at'])
-    else:
-      latest_datetime = max(latest_datetime, parse_timestamp(status['updated_at']))
-  # First status is the most recent one.
-  if any([successes, failures, errors]) and sum([successes, failures, errors]) > 15:
-    return {'latest_datetime': latest_datetime,
+    status_url = statuses_url.replace('statuses', 'status')
+    statuses = json.loads(gh(status_url + '?per_page=100'))
+    successes = 0
+    failures = 0
+    errors = 0
+    latest_datetime = None
+    if not statuses: return None
+    if system == 'kokoro': string_in_target_url = 'kokoro'
+    elif system == 'jenkins': string_in_target_url = 'grpc-testing'
+    for status in statuses['statuses']:
+        if not status['target_url'] or string_in_target_url not in status[
+                'target_url']:
+            continue  # Ignore jenkins
+        if status['state'] == 'pending': return None
+        elif status['state'] == 'success': successes += 1
+        elif status['state'] == 'failure': failures += 1
+        elif status['state'] == 'error': errors += 1
+        if not latest_datetime:
+            latest_datetime = parse_timestamp(status['updated_at'])
+        else:
+            latest_datetime = max(latest_datetime,
+                                  parse_timestamp(status['updated_at']))
+    # First status is the most recent one.
+    if any([successes, failures, errors]) and sum(
+        [successes, failures, errors]) > 15:
+        return {
+            'latest_datetime': latest_datetime,
             'successes': successes,
             'failures': failures,
-            'errors': errors}
-  else: return None
+            'errors': errors
+        }
+    else:
+        return None
 
 
 def build_args_parser():
-  import argparse
-  parser = argparse.ArgumentParser()
-  parser.add_argument('--format', type=str, choices=['human', 'csv'],
-                      default='human',
-                      help='Output format: are you a human or a machine?')
-  parser.add_argument('--system', type=str, choices=['jenkins', 'kokoro'],
-                      required=True, help='Consider only the given CI system')
-  parser.add_argument('--token', type=str, default='',
-                      help='GitHub token to use its API with a higher rate limit')
-  return parser
+    import argparse
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        '--format',
+        type=str,
+        choices=['human', 'csv'],
+        default='human',
+        help='Output format: are you a human or a machine?')
+    parser.add_argument(
+        '--system',
+        type=str,
+        choices=['jenkins', 'kokoro'],
+        required=True,
+        help='Consider only the given CI system')
+    parser.add_argument(
+        '--token',
+        type=str,
+        default='',
+        help='GitHub token to use its API with a higher rate limit')
+    return parser
 
 
 def main():
-  import sys
-  global TOKEN
-  args_parser = build_args_parser()
-  args = args_parser.parse_args()
-  TOKEN = args.token
-  if args.format == 'csv': print_csv_header()
-  for pr_data in get_pr_data():
-    commit_data = get_commits_data(pr_data['number'])
-    # PR with a single commit -> use the PRs creation time.
-    # else -> use the latest commit's date.
-    base_timestamp = pr_data['updated_at']
-    if commit_data['num_commits'] > 1:
-      base_timestamp = commit_data['most_recent_date']
-    else:
-      base_timestamp = pr_data['created_at']
-    last_status = get_status_data(pr_data['statuses_url'], args.system)
-    if last_status:
-      diff = last_status['latest_datetime'] - base_timestamp
-      if diff < timedelta(hours=5):
-        output(pr_data['number'], base_timestamp, last_status['latest_datetime'],
-               diff, last_status['successes'], last_status['failures'],
-               last_status['errors'], mode=args.format)
+    import sys
+    global TOKEN
+    args_parser = build_args_parser()
+    args = args_parser.parse_args()
+    TOKEN = args.token
+    if args.format == 'csv': print_csv_header()
+    for pr_data in get_pr_data():
+        commit_data = get_commits_data(pr_data['number'])
+        # PR with a single commit -> use the PRs creation time.
+        # else -> use the latest commit's date.
+        base_timestamp = pr_data['updated_at']
+        if commit_data['num_commits'] > 1:
+            base_timestamp = commit_data['most_recent_date']
+        else:
+            base_timestamp = pr_data['created_at']
+        last_status = get_status_data(pr_data['statuses_url'], args.system)
+        if last_status:
+            diff = last_status['latest_datetime'] - base_timestamp
+            if diff < timedelta(hours=5):
+                output(
+                    pr_data['number'],
+                    base_timestamp,
+                    last_status['latest_datetime'],
+                    diff,
+                    last_status['successes'],
+                    last_status['failures'],
+                    last_status['errors'],
+                    mode=args.format)
 
 
 if __name__ == '__main__':
-  main()
+    main()
diff --git a/tools/internal_ci/linux/grpc_bazel_on_foundry_dbg.sh b/tools/internal_ci/linux/grpc_bazel_on_foundry_dbg.sh
index c43ac0e..a767218 100644
--- a/tools/internal_ci/linux/grpc_bazel_on_foundry_dbg.sh
+++ b/tools/internal_ci/linux/grpc_bazel_on_foundry_dbg.sh
@@ -50,7 +50,7 @@
   --genrule_strategy=remote  \
   --experimental_strict_action_env=true \
   --experimental_remote_platform_override='properties:{name:"container-image" value:"docker://gcr.io/asci-toolchain/nosla-debian8-clang-fl@sha256:aa20628a902f06a11a015caa94b0432eb60690de2d2525bd046b9eea046f5d8a" }' \
-  --crosstool_top=@bazel_toolchains//configs/debian8_clang/0.2.0/bazel_0.7.0:toolchain \
+  --crosstool_top=@com_github_bazelbuild_bazeltoolchains//configs/debian8_clang/0.2.0/bazel_0.7.0:toolchain \
   --define GRPC_PORT_ISOLATED_RUNTIME=1 \
   -c dbg \
   -- //test/...
diff --git a/tools/internal_ci/linux/grpc_bazel_on_foundry_opt.sh b/tools/internal_ci/linux/grpc_bazel_on_foundry_opt.sh
index b106b71..defe664 100644
--- a/tools/internal_ci/linux/grpc_bazel_on_foundry_opt.sh
+++ b/tools/internal_ci/linux/grpc_bazel_on_foundry_opt.sh
@@ -50,7 +50,7 @@
   --genrule_strategy=remote  \
   --experimental_strict_action_env=true \
   --experimental_remote_platform_override='properties:{name:"container-image" value:"docker://gcr.io/asci-toolchain/nosla-debian8-clang-fl@sha256:aa20628a902f06a11a015caa94b0432eb60690de2d2525bd046b9eea046f5d8a" }' \
-  --crosstool_top=@bazel_toolchains//configs/debian8_clang/0.2.0/bazel_0.7.0:toolchain \
+  --crosstool_top=@com_github_bazelbuild_bazeltoolchains//configs/debian8_clang/0.2.0/bazel_0.7.0:toolchain \
   --define GRPC_PORT_ISOLATED_RUNTIME=1 \
   -c opt \
   -- //test/...
diff --git a/tools/interop_matrix/client_matrix.py b/tools/interop_matrix/client_matrix.py
index 7281307..71d3a79 100644
--- a/tools/interop_matrix/client_matrix.py
+++ b/tools/interop_matrix/client_matrix.py
@@ -15,29 +15,34 @@
 
 # Dictionaries used for client matrix testing.
 
+
 def get_github_repo(lang):
-  return {
-      'go': 'git@github.com:grpc/grpc-go.git',
-      'java': 'git@github.com:grpc/grpc-java.git',
-      'node': 'git@github.com:grpc/grpc-node.git',
-      # all other languages use the grpc.git repo.
-  }.get(lang, 'git@github.com:grpc/grpc.git')
+    return {
+        'go': 'git@github.com:grpc/grpc-go.git',
+        'java': 'git@github.com:grpc/grpc-java.git',
+        'node': 'git@github.com:grpc/grpc-node.git',
+        # all other languages use the grpc.git repo.
+    }.get(lang, 'git@github.com:grpc/grpc.git')
+
 
 def get_release_tags(lang):
-  return map(lambda r: get_release_tag_name(r), LANG_RELEASE_MATRIX[lang])
+    return map(lambda r: get_release_tag_name(r), LANG_RELEASE_MATRIX[lang])
+
 
 def get_release_tag_name(release_info):
-  assert len(release_info.keys()) == 1
-  return release_info.keys()[0]
+    assert len(release_info.keys()) == 1
+    return release_info.keys()[0]
+
 
 def should_build_docker_interop_image_from_release_tag(lang):
-  if lang in ['go', 'java', 'node']:
-    return False
-  return True
+    if lang in ['go', 'java', 'node']:
+        return False
+    return True
+
 
 # Dictionary of runtimes per language
 LANG_RUNTIME_MATRIX = {
-    'cxx': ['cxx'],             # This is actually debian8.
+    'cxx': ['cxx'],  # This is actually debian8.
     'go': ['go1.7', 'go1.8'],
     'java': ['java_oracle8'],
     'python': ['python'],
@@ -51,81 +56,197 @@
 # a release tag pointing to the latest build of the branch.
 LANG_RELEASE_MATRIX = {
     'cxx': [
-        {'v1.0.1': None},
-        {'v1.1.4': None},
-        {'v1.2.5': None},
-        {'v1.3.9': None},
-        {'v1.4.2': None},
-        {'v1.6.6': None},
-        {'v1.7.2': None},
+        {
+            'v1.0.1': None
+        },
+        {
+            'v1.1.4': None
+        },
+        {
+            'v1.2.5': None
+        },
+        {
+            'v1.3.9': None
+        },
+        {
+            'v1.4.2': None
+        },
+        {
+            'v1.6.6': None
+        },
+        {
+            'v1.7.2': None
+        },
     ],
     'go': [
-        {'v1.0.5': None},
-        {'v1.2.1': None},
-        {'v1.3.0': None},
-        {'v1.4.2': None},
-        {'v1.5.2': None},
-        {'v1.6.0': None},
-        {'v1.7.4': None},
-        {'v1.8.1': None},
+        {
+            'v1.0.5': None
+        },
+        {
+            'v1.2.1': None
+        },
+        {
+            'v1.3.0': None
+        },
+        {
+            'v1.4.2': None
+        },
+        {
+            'v1.5.2': None
+        },
+        {
+            'v1.6.0': None
+        },
+        {
+            'v1.7.4': None
+        },
+        {
+            'v1.8.2': None
+        },
     ],
     'java': [
-        {'v1.0.3': None},
-        {'v1.1.2': None},
-        {'v1.2.0': None},
-        {'v1.3.1': None},
-        {'v1.4.0': None},
-        {'v1.5.0': None},
-        {'v1.6.1': None},
-        {'v1.7.0': None},
-        {'v1.8.0': None},
+        {
+            'v1.0.3': None
+        },
+        {
+            'v1.1.2': None
+        },
+        {
+            'v1.2.0': None
+        },
+        {
+            'v1.3.1': None
+        },
+        {
+            'v1.4.0': None
+        },
+        {
+            'v1.5.0': None
+        },
+        {
+            'v1.6.1': None
+        },
+        {
+            'v1.7.0': None
+        },
+        {
+            'v1.8.0': None
+        },
     ],
     'python': [
-        {'v1.0.x': None},
-        {'v1.1.4': None},
-        {'v1.2.5': None},
-        {'v1.3.9': None},
-        {'v1.4.2': None},
-        {'v1.6.6': None},
-        {'v1.7.2': None},
+        {
+            'v1.0.x': None
+        },
+        {
+            'v1.1.4': None
+        },
+        {
+            'v1.2.5': None
+        },
+        {
+            'v1.3.9': None
+        },
+        {
+            'v1.4.2': None
+        },
+        {
+            'v1.6.6': None
+        },
+        {
+            'v1.7.2': None
+        },
     ],
     'node': [
-        {'v1.0.1': None},
-        {'v1.1.4': None},
-        {'v1.2.5': None},
-        {'v1.3.9': None},
-        {'v1.4.2': None},
-        {'v1.6.6': None},
+        {
+            'v1.0.1': None
+        },
+        {
+            'v1.1.4': None
+        },
+        {
+            'v1.2.5': None
+        },
+        {
+            'v1.3.9': None
+        },
+        {
+            'v1.4.2': None
+        },
+        {
+            'v1.6.6': None
+        },
         #{'v1.7.1': None}, Failing tests
     ],
     'ruby': [
-        {'v1.0.1': {'patch': [
-            'tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile',
-            'tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh',
-        ]}},
-        {'v1.1.4': None},
-        {'v1.2.5': None},
-        {'v1.3.9': None},
-        {'v1.4.2': None},
-        {'v1.6.6': None},
-        {'v1.7.2': None},
+        {
+            'v1.0.1': {
+                'patch': [
+                    'tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile',
+                    'tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh',
+                ]
+            }
+        },
+        {
+            'v1.1.4': None
+        },
+        {
+            'v1.2.5': None
+        },
+        {
+            'v1.3.9': None
+        },
+        {
+            'v1.4.2': None
+        },
+        {
+            'v1.6.6': None
+        },
+        {
+            'v1.7.2': None
+        },
     ],
     'php': [
-        {'v1.0.1': None},
-        {'v1.1.4': None},
-        {'v1.2.5': None},
-        {'v1.3.9': None},
-        {'v1.4.2': None},
-        {'v1.6.6': None},
-        {'v1.7.2': None},
+        {
+            'v1.0.1': None
+        },
+        {
+            'v1.1.4': None
+        },
+        {
+            'v1.2.5': None
+        },
+        {
+            'v1.3.9': None
+        },
+        {
+            'v1.4.2': None
+        },
+        {
+            'v1.6.6': None
+        },
+        {
+            'v1.7.2': None
+        },
     ],
-   'csharp': [
+    'csharp': [
         #{'v1.0.1': None},
-        {'v1.1.4': None},
-        {'v1.2.5': None},
-        {'v1.3.9': None},
-        {'v1.4.2': None},
-        {'v1.6.6': None},
-        {'v1.7.2': None},
+        {
+            'v1.1.4': None
+        },
+        {
+            'v1.2.5': None
+        },
+        {
+            'v1.3.9': None
+        },
+        {
+            'v1.4.2': None
+        },
+        {
+            'v1.6.6': None
+        },
+        {
+            'v1.7.2': None
+        },
     ],
 }
diff --git a/tools/interop_matrix/create_matrix_images.py b/tools/interop_matrix/create_matrix_images.py
index a292368..ad7bb59 100755
--- a/tools/interop_matrix/create_matrix_images.py
+++ b/tools/interop_matrix/create_matrix_images.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Build and upload docker images to Google Container Registry per matrix."""
 
 from __future__ import print_function
@@ -29,8 +28,8 @@
 # Langauage Runtime Matrix
 import client_matrix
 
-python_util_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../run_tests/python_utils'))
+python_util_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../run_tests/python_utils'))
 sys.path.append(python_util_dir)
 import dockerjob
 import jobset
@@ -38,267 +37,305 @@
 _IMAGE_BUILDER = 'tools/run_tests/dockerize/build_interop_image.sh'
 _LANGUAGES = client_matrix.LANG_RUNTIME_MATRIX.keys()
 # All gRPC release tags, flattened, deduped and sorted.
-_RELEASES = sorted(list(set(
-    client_matrix.get_release_tag_name(info) for lang in client_matrix.LANG_RELEASE_MATRIX.values() for info in lang)))
+_RELEASES = sorted(
+    list(
+        set(
+            client_matrix.get_release_tag_name(info)
+            for lang in client_matrix.LANG_RELEASE_MATRIX.values()
+            for info in lang)))
 
 # Destination directory inside docker image to keep extra info from build time.
 _BUILD_INFO = '/var/local/build_info'
 
 argp = argparse.ArgumentParser(description='Run interop tests.')
-argp.add_argument('--gcr_path',
-                  default='gcr.io/grpc-testing',
-                  help='Path of docker images in Google Container Registry')
+argp.add_argument(
+    '--gcr_path',
+    default='gcr.io/grpc-testing',
+    help='Path of docker images in Google Container Registry')
 
-argp.add_argument('--release',
-                  default='master',
-                  choices=['all', 'master'] + _RELEASES,
-                  help='github commit tag to checkout.  When building all '
-                  'releases defined in client_matrix.py, use "all". Valid only '
-                  'with --git_checkout.')
+argp.add_argument(
+    '--release',
+    default='master',
+    choices=['all', 'master'] + _RELEASES,
+    help='github commit tag to checkout.  When building all '
+    'releases defined in client_matrix.py, use "all". Valid only '
+    'with --git_checkout.')
 
-argp.add_argument('-l', '--language',
-                  choices=['all'] + sorted(_LANGUAGES),
-                  nargs='+',
-                  default=['all'],
-                  help='Test languages to build docker images for.')
+argp.add_argument(
+    '-l',
+    '--language',
+    choices=['all'] + sorted(_LANGUAGES),
+    nargs='+',
+    default=['all'],
+    help='Test languages to build docker images for.')
 
-argp.add_argument('--git_checkout',
-                  action='store_true',
-                  help='Use a separate git clone tree for building grpc stack. '
-                  'Required when using --release flag.  By default, current'
-                  'tree and the sibling will be used for building grpc stack.')
+argp.add_argument(
+    '--git_checkout',
+    action='store_true',
+    help='Use a separate git clone tree for building grpc stack. '
+    'Required when using --release flag.  By default, current'
+    'tree and the sibling will be used for building grpc stack.')
 
-argp.add_argument('--git_checkout_root',
-                  default='/export/hda3/tmp/grpc_matrix',
-                  help='Directory under which grpc-go/java/main repo will be '
-                  'cloned.  Valid only with --git_checkout.')
+argp.add_argument(
+    '--git_checkout_root',
+    default='/export/hda3/tmp/grpc_matrix',
+    help='Directory under which grpc-go/java/main repo will be '
+    'cloned.  Valid only with --git_checkout.')
 
-argp.add_argument('--keep',
-                  action='store_true',
-                  help='keep the created local images after uploading to GCR')
+argp.add_argument(
+    '--keep',
+    action='store_true',
+    help='keep the created local images after uploading to GCR')
 
-argp.add_argument('--reuse_git_root',
-                  default=False,
-                  action='store_const',
-                  const=True,                  
-                  help='reuse the repo dir. If False, the existing git root '
-                  'directory will removed before a clean checkout, because '
-                  'reusing the repo can cause git checkout error if you switch '
-                  'between releases.')
-
+argp.add_argument(
+    '--reuse_git_root',
+    default=False,
+    action='store_const',
+    const=True,
+    help='reuse the repo dir. If False, the existing git root '
+    'directory will removed before a clean checkout, because '
+    'reusing the repo can cause git checkout error if you switch '
+    'between releases.')
 
 args = argp.parse_args()
 
+
 def add_files_to_image(image, with_files, label=None):
-  """Add files to a docker image.
+    """Add files to a docker image.
 
   image: docker image name, i.e. grpc_interop_java:26328ad8
   with_files: additional files to include in the docker image.
   label: label string to attach to the image.
   """
-  tag_idx = image.find(':')
-  if tag_idx == -1:
-    jobset.message('FAILED', 'invalid docker image %s' % image, do_newline=True)
-    sys.exit(1)
-  orig_tag = '%s_' % image
-  subprocess.check_output(['docker', 'tag', image, orig_tag])
+    tag_idx = image.find(':')
+    if tag_idx == -1:
+        jobset.message(
+            'FAILED', 'invalid docker image %s' % image, do_newline=True)
+        sys.exit(1)
+    orig_tag = '%s_' % image
+    subprocess.check_output(['docker', 'tag', image, orig_tag])
 
-  lines = ['FROM ' + orig_tag]
-  if label:
-    lines.append('LABEL %s' % label)
+    lines = ['FROM ' + orig_tag]
+    if label:
+        lines.append('LABEL %s' % label)
 
-  temp_dir = tempfile.mkdtemp()
-  atexit.register(lambda: subprocess.call(['rm', '-rf', temp_dir]))
+    temp_dir = tempfile.mkdtemp()
+    atexit.register(lambda: subprocess.call(['rm', '-rf', temp_dir]))
 
-  # Copy with_files inside the tmp directory, which will be the docker build
-  # context.
-  for f in with_files:
-    shutil.copy(f, temp_dir)
-    lines.append('COPY %s %s/' % (os.path.basename(f), _BUILD_INFO))
+    # Copy with_files inside the tmp directory, which will be the docker build
+    # context.
+    for f in with_files:
+        shutil.copy(f, temp_dir)
+        lines.append('COPY %s %s/' % (os.path.basename(f), _BUILD_INFO))
 
-  # Create a Dockerfile.
-  with open(os.path.join(temp_dir, 'Dockerfile'), 'w') as f:
-    f.write('\n'.join(lines))
+    # Create a Dockerfile.
+    with open(os.path.join(temp_dir, 'Dockerfile'), 'w') as f:
+        f.write('\n'.join(lines))
 
-  jobset.message('START', 'Repackaging %s' % image, do_newline=True)
-  build_cmd = ['docker', 'build', '--rm', '--tag', image, temp_dir]
-  subprocess.check_output(build_cmd)
-  dockerjob.remove_image(orig_tag, skip_nonexistent=True)
+    jobset.message('START', 'Repackaging %s' % image, do_newline=True)
+    build_cmd = ['docker', 'build', '--rm', '--tag', image, temp_dir]
+    subprocess.check_output(build_cmd)
+    dockerjob.remove_image(orig_tag, skip_nonexistent=True)
+
 
 def build_image_jobspec(runtime, env, gcr_tag, stack_base):
-  """Build interop docker image for a language with runtime.
+    """Build interop docker image for a language with runtime.
 
   runtime: a <lang><version> string, for example go1.8.
   env:     dictionary of env to passed to the build script.
   gcr_tag: the tag for the docker image (i.e. v1.3.0).
   stack_base: the local gRPC repo path.
   """
-  basename = 'grpc_interop_%s' % runtime
-  tag = '%s/%s:%s' % (args.gcr_path, basename, gcr_tag)
-  build_env = {
-      'INTEROP_IMAGE': tag,
-      'BASE_NAME': basename,
-      'TTY_FLAG': '-t'
-  }
-  build_env.update(env)
-  image_builder_path = _IMAGE_BUILDER
-  if client_matrix.should_build_docker_interop_image_from_release_tag(lang):
-    image_builder_path = os.path.join(stack_base, _IMAGE_BUILDER)
-  build_job = jobset.JobSpec(
-          cmdline=[image_builder_path],
-          environ=build_env,
-          shortname='build_docker_%s' % runtime,
-          timeout_seconds=30*60)
-  build_job.tag = tag
-  return build_job
+    basename = 'grpc_interop_%s' % runtime
+    tag = '%s/%s:%s' % (args.gcr_path, basename, gcr_tag)
+    build_env = {'INTEROP_IMAGE': tag, 'BASE_NAME': basename, 'TTY_FLAG': '-t'}
+    build_env.update(env)
+    image_builder_path = _IMAGE_BUILDER
+    if client_matrix.should_build_docker_interop_image_from_release_tag(lang):
+        image_builder_path = os.path.join(stack_base, _IMAGE_BUILDER)
+    build_job = jobset.JobSpec(
+        cmdline=[image_builder_path],
+        environ=build_env,
+        shortname='build_docker_%s' % runtime,
+        timeout_seconds=30 * 60)
+    build_job.tag = tag
+    return build_job
+
 
 def build_all_images_for_lang(lang):
-  """Build all docker images for a language across releases and runtimes."""
-  if not args.git_checkout:
-    if args.release != 'master':
-      print('WARNING: --release is set but will be ignored\n')
-    releases = ['master']
-  else:
-    if args.release == 'all':
-      releases = client_matrix.get_release_tags(lang)
+    """Build all docker images for a language across releases and runtimes."""
+    if not args.git_checkout:
+        if args.release != 'master':
+            print('WARNING: --release is set but will be ignored\n')
+        releases = ['master']
     else:
-      # Build a particular release.
-      if args.release not in ['master'] + client_matrix.get_release_tags(lang):
-        jobset.message('SKIPPED',
-                       '%s for %s is not defined' % (args.release, lang),
-                       do_newline=True)
-        return []
-      releases = [args.release]
+        if args.release == 'all':
+            releases = client_matrix.get_release_tags(lang)
+        else:
+            # Build a particular release.
+            if args.release not in ['master'] + client_matrix.get_release_tags(
+                    lang):
+                jobset.message(
+                    'SKIPPED',
+                    '%s for %s is not defined' % (args.release, lang),
+                    do_newline=True)
+                return []
+            releases = [args.release]
 
-  images = []
-  for release in releases:
-    images += build_all_images_for_release(lang, release)
-  jobset.message('SUCCESS',
-                 'All docker images built for %s at %s.' % (lang, releases),
-                 do_newline=True)
-  return images
+    images = []
+    for release in releases:
+        images += build_all_images_for_release(lang, release)
+    jobset.message(
+        'SUCCESS',
+        'All docker images built for %s at %s.' % (lang, releases),
+        do_newline=True)
+    return images
+
 
 def build_all_images_for_release(lang, release):
-  """Build all docker images for a release across all runtimes."""
-  docker_images = []
-  build_jobs = []
+    """Build all docker images for a release across all runtimes."""
+    docker_images = []
+    build_jobs = []
 
-  env = {}
-  # If we not using current tree or the sibling for grpc stack, do checkout.
-  stack_base = ''
-  if args.git_checkout:
-    stack_base = checkout_grpc_stack(lang, release)
-    var ={'go': 'GRPC_GO_ROOT', 'java': 'GRPC_JAVA_ROOT', 'node': 'GRPC_NODE_ROOT'}.get(lang, 'GRPC_ROOT')
-    env[var] = stack_base
+    env = {}
+    # If we not using current tree or the sibling for grpc stack, do checkout.
+    stack_base = ''
+    if args.git_checkout:
+        stack_base = checkout_grpc_stack(lang, release)
+        var = {
+            'go': 'GRPC_GO_ROOT',
+            'java': 'GRPC_JAVA_ROOT',
+            'node': 'GRPC_NODE_ROOT'
+        }.get(lang, 'GRPC_ROOT')
+        env[var] = stack_base
 
-  for runtime in client_matrix.LANG_RUNTIME_MATRIX[lang]:
-    job = build_image_jobspec(runtime, env, release, stack_base)
-    docker_images.append(job.tag)
-    build_jobs.append(job)
+    for runtime in client_matrix.LANG_RUNTIME_MATRIX[lang]:
+        job = build_image_jobspec(runtime, env, release, stack_base)
+        docker_images.append(job.tag)
+        build_jobs.append(job)
 
-  jobset.message('START', 'Building interop docker images.', do_newline=True)
-  print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
+    jobset.message('START', 'Building interop docker images.', do_newline=True)
+    print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
 
-  num_failures, _ = jobset.run(
-      build_jobs, newline_on_success=True, maxjobs=multiprocessing.cpu_count())
-  if num_failures:
-    jobset.message('FAILED', 'Failed to build interop docker images.',
-                   do_newline=True)
-    docker_images_cleanup.extend(docker_images)
-    sys.exit(1)
+    num_failures, _ = jobset.run(
+        build_jobs,
+        newline_on_success=True,
+        maxjobs=multiprocessing.cpu_count())
+    if num_failures:
+        jobset.message(
+            'FAILED', 'Failed to build interop docker images.', do_newline=True)
+        docker_images_cleanup.extend(docker_images)
+        sys.exit(1)
 
-  jobset.message('SUCCESS',
-                 'All docker images built for %s at %s.' % (lang, release),
-                 do_newline=True)
+    jobset.message(
+        'SUCCESS',
+        'All docker images built for %s at %s.' % (lang, release),
+        do_newline=True)
 
-  if release != 'master':
-    commit_log = os.path.join(stack_base, 'commit_log')
-    if os.path.exists(commit_log):
-      for image in docker_images:
-        add_files_to_image(image, [commit_log], 'release=%s' % release)
-  return docker_images
+    if release != 'master':
+        commit_log = os.path.join(stack_base, 'commit_log')
+        if os.path.exists(commit_log):
+            for image in docker_images:
+                add_files_to_image(image, [commit_log], 'release=%s' % release)
+    return docker_images
+
 
 def cleanup():
-  if not args.keep:
-    for image in docker_images_cleanup:
-      dockerjob.remove_image(image, skip_nonexistent=True)
+    if not args.keep:
+        for image in docker_images_cleanup:
+            dockerjob.remove_image(image, skip_nonexistent=True)
+
 
 docker_images_cleanup = []
 atexit.register(cleanup)
 
+
 def maybe_apply_patches_on_git_tag(stack_base, lang, release):
-  files_to_patch = []
-  for release_info in client_matrix.LANG_RELEASE_MATRIX[lang]:
-    if client_matrix.get_release_tag_name(release_info) == release:
-      files_to_patch = release_info[release].get('patch')
-      break
-  if not files_to_patch:
-    return
-  patch_file_relative_path = 'patches/%s_%s/git_repo.patch' % (lang, release)
-  patch_file = os.path.abspath(os.path.join(os.path.dirname(__file__),
-                                            patch_file_relative_path))
-  if not os.path.exists(patch_file):
-    jobset.message('FAILED', 'expected patch file |%s| to exist' % patch_file)
-    sys.exit(1)
-  subprocess.check_output(
-      ['git', 'apply', patch_file], cwd=stack_base, stderr=subprocess.STDOUT)
-  for repo_relative_path in files_to_patch:
+    files_to_patch = []
+    for release_info in client_matrix.LANG_RELEASE_MATRIX[lang]:
+        if client_matrix.get_release_tag_name(release_info) == release:
+            if release_info[release] is not None:
+                files_to_patch = release_info[release].get('patch')
+                break
+    if not files_to_patch:
+        return
+    patch_file_relative_path = 'patches/%s_%s/git_repo.patch' % (lang, release)
+    patch_file = os.path.abspath(
+        os.path.join(os.path.dirname(__file__), patch_file_relative_path))
+    if not os.path.exists(patch_file):
+        jobset.message('FAILED',
+                       'expected patch file |%s| to exist' % patch_file)
+        sys.exit(1)
     subprocess.check_output(
-        ['git', 'add', repo_relative_path],
+        ['git', 'apply', patch_file], cwd=stack_base, stderr=subprocess.STDOUT)
+    for repo_relative_path in files_to_patch:
+        subprocess.check_output(
+            ['git', 'add', repo_relative_path],
+            cwd=stack_base,
+            stderr=subprocess.STDOUT)
+    subprocess.check_output(
+        [
+            'git', 'commit', '-m',
+            ('Hack performed on top of %s git '
+             'tag in order to build and run the %s '
+             'interop tests on that tag.' % (lang, release))
+        ],
         cwd=stack_base,
         stderr=subprocess.STDOUT)
-  subprocess.check_output(
-      ['git', 'commit', '-m', ('Hack performed on top of %s git '
-                               'tag in order to build and run the %s '
-                               'interop tests on that tag.' % (lang, release))],
-      cwd=stack_base, stderr=subprocess.STDOUT)
+
 
 def checkout_grpc_stack(lang, release):
-  """Invokes 'git check' for the lang/release and returns directory created."""
-  assert args.git_checkout and args.git_checkout_root
+    """Invokes 'git check' for the lang/release and returns directory created."""
+    assert args.git_checkout and args.git_checkout_root
 
-  if not os.path.exists(args.git_checkout_root):
-    os.makedirs(args.git_checkout_root)
+    if not os.path.exists(args.git_checkout_root):
+        os.makedirs(args.git_checkout_root)
 
-  repo = client_matrix.get_github_repo(lang)
-  # Get the subdir name part of repo
-  # For example, 'git@github.com:grpc/grpc-go.git' should use 'grpc-go'.
-  repo_dir = os.path.splitext(os.path.basename(repo))[0]
-  stack_base = os.path.join(args.git_checkout_root, repo_dir)
+    repo = client_matrix.get_github_repo(lang)
+    # Get the subdir name part of repo
+    # For example, 'git@github.com:grpc/grpc-go.git' should use 'grpc-go'.
+    repo_dir = os.path.splitext(os.path.basename(repo))[0]
+    stack_base = os.path.join(args.git_checkout_root, repo_dir)
 
-  # Clean up leftover repo dir if necessary.
-  if not args.reuse_git_root and os.path.exists(stack_base):
-    jobset.message('START', 'Removing git checkout root.', do_newline=True)
-    shutil.rmtree(stack_base)
+    # Clean up leftover repo dir if necessary.
+    if not args.reuse_git_root and os.path.exists(stack_base):
+        jobset.message('START', 'Removing git checkout root.', do_newline=True)
+        shutil.rmtree(stack_base)
 
-  if not os.path.exists(stack_base):
-    subprocess.check_call(['git', 'clone', '--recursive', repo],
-                          cwd=os.path.dirname(stack_base))
+    if not os.path.exists(stack_base):
+        subprocess.check_call(
+            ['git', 'clone', '--recursive', repo],
+            cwd=os.path.dirname(stack_base))
 
-  # git checkout.
-  jobset.message('START', 'git checkout %s from %s' % (release, stack_base),
-                 do_newline=True)
-  # We should NEVER do checkout on current tree !!!
-  assert not os.path.dirname(__file__).startswith(stack_base)
-  output = subprocess.check_output(
-      ['git', 'checkout', release], cwd=stack_base, stderr=subprocess.STDOUT)
-  maybe_apply_patches_on_git_tag(stack_base, lang, release)
-  commit_log = subprocess.check_output(['git', 'log', '-1'], cwd=stack_base)
-  jobset.message('SUCCESS', 'git checkout', 
-                 '%s: %s' % (str(output), commit_log), 
-                 do_newline=True)
+    # git checkout.
+    jobset.message(
+        'START',
+        'git checkout %s from %s' % (release, stack_base),
+        do_newline=True)
+    # We should NEVER do checkout on current tree !!!
+    assert not os.path.dirname(__file__).startswith(stack_base)
+    output = subprocess.check_output(
+        ['git', 'checkout', release], cwd=stack_base, stderr=subprocess.STDOUT)
+    maybe_apply_patches_on_git_tag(stack_base, lang, release)
+    commit_log = subprocess.check_output(['git', 'log', '-1'], cwd=stack_base)
+    jobset.message(
+        'SUCCESS',
+        'git checkout',
+        '%s: %s' % (str(output), commit_log),
+        do_newline=True)
 
-  # Write git log to commit_log so it can be packaged with the docker image.
-  with open(os.path.join(stack_base, 'commit_log'), 'w') as f:
-    f.write(commit_log)
-  return stack_base
+    # Write git log to commit_log so it can be packaged with the docker image.
+    with open(os.path.join(stack_base, 'commit_log'), 'w') as f:
+        f.write(commit_log)
+    return stack_base
+
 
 languages = args.language if args.language != ['all'] else _LANGUAGES
 for lang in languages:
-  docker_images = build_all_images_for_lang(lang)
-  for image in docker_images:
-    jobset.message('START', 'Uploading %s' % image, do_newline=True)
-    # docker image name must be in the format <gcr_path>/<image>:<gcr_tag>
-    assert image.startswith(args.gcr_path) and image.find(':') != -1
+    docker_images = build_all_images_for_lang(lang)
+    for image in docker_images:
+        jobset.message('START', 'Uploading %s' % image, do_newline=True)
+        # docker image name must be in the format <gcr_path>/<image>:<gcr_tag>
+        assert image.startswith(args.gcr_path) and image.find(':') != -1
 
-    subprocess.call(['gcloud', 'docker', '--', 'push', image])
+        subprocess.call(['gcloud', 'docker', '--', 'push', image])
diff --git a/tools/interop_matrix/run_interop_matrix_tests.py b/tools/interop_matrix/run_interop_matrix_tests.py
index 1bc35d9..ff42bd7 100755
--- a/tools/interop_matrix/run_interop_matrix_tests.py
+++ b/tools/interop_matrix/run_interop_matrix_tests.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Run tests using docker images in Google Container Registry per matrix."""
 
 from __future__ import print_function
@@ -30,8 +29,8 @@
 # Langauage Runtime Matrix
 import client_matrix
 
-python_util_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../run_tests/python_utils'))
+python_util_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../run_tests/python_utils'))
 sys.path.append(python_util_dir)
 import dockerjob
 import jobset
@@ -40,46 +39,56 @@
 
 _LANGUAGES = client_matrix.LANG_RUNTIME_MATRIX.keys()
 # All gRPC release tags, flattened, deduped and sorted.
-_RELEASES = sorted(list(set(
-    client_matrix.get_release_tag_name(info) for lang in client_matrix.LANG_RELEASE_MATRIX.values() for info in lang)))
+_RELEASES = sorted(
+    list(
+        set(
+            client_matrix.get_release_tag_name(info)
+            for lang in client_matrix.LANG_RELEASE_MATRIX.values()
+            for info in lang)))
 _TEST_TIMEOUT = 30
 
 argp = argparse.ArgumentParser(description='Run interop tests.')
 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
-argp.add_argument('--gcr_path',
-                  default='gcr.io/grpc-testing',
-                  help='Path of docker images in Google Container Registry')
-argp.add_argument('--release',
-                  default='all',
-                  choices=['all', 'master'] + _RELEASES,
-                  help='Release tags to test.  When testing all '
-                  'releases defined in client_matrix.py, use "all".')
+argp.add_argument(
+    '--gcr_path',
+    default='gcr.io/grpc-testing',
+    help='Path of docker images in Google Container Registry')
+argp.add_argument(
+    '--release',
+    default='all',
+    choices=['all', 'master'] + _RELEASES,
+    help='Release tags to test.  When testing all '
+    'releases defined in client_matrix.py, use "all".')
 
-argp.add_argument('-l', '--language',
-                  choices=['all'] + sorted(_LANGUAGES),
-                  nargs='+',
-                  default=['all'],
-                  help='Languages to test')
+argp.add_argument(
+    '-l',
+    '--language',
+    choices=['all'] + sorted(_LANGUAGES),
+    nargs='+',
+    default=['all'],
+    help='Languages to test')
 
-argp.add_argument('--keep',
-                  action='store_true',
-                  help='keep the created local images after finishing the tests.')
+argp.add_argument(
+    '--keep',
+    action='store_true',
+    help='keep the created local images after finishing the tests.')
 
-argp.add_argument('--report_file',
-                  default='report.xml',
-                  help='The result file to create.')
+argp.add_argument(
+    '--report_file', default='report.xml', help='The result file to create.')
 
-argp.add_argument('--allow_flakes',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help=('Allow flaky tests to show as passing (re-runs failed '
-                        'tests up to five times)'))
-argp.add_argument('--bq_result_table',
-                  default='',
-                  type=str,
-                  nargs='?',
-                  help='Upload test results to a specified BQ table.')
+argp.add_argument(
+    '--allow_flakes',
+    default=False,
+    action='store_const',
+    const=True,
+    help=('Allow flaky tests to show as passing (re-runs failed '
+          'tests up to five times)'))
+argp.add_argument(
+    '--bq_result_table',
+    default='',
+    type=str,
+    nargs='?',
+    help='Upload test results to a specified BQ table.')
 
 args = argp.parse_args()
 
@@ -87,138 +96,153 @@
 
 
 def find_all_images_for_lang(lang):
-  """Find docker images for a language across releases and runtimes.
+    """Find docker images for a language across releases and runtimes.
 
   Returns dictionary of list of (<tag>, <image-full-path>) keyed by runtime.
   """
-  # Find all defined releases.
-  if args.release == 'all':
-    releases = ['master'] + client_matrix.get_release_tags(lang)
-  else:
-    # Look for a particular release.
-    if args.release not in ['master'] + client_matrix.get_release_tags(lang):
-      jobset.message('SKIPPED',
-                     '%s for %s is not defined' % (args.release, lang),
-                     do_newline=True)
-      return {}
-    releases = [args.release]
+    # Find all defined releases.
+    if args.release == 'all':
+        releases = ['master'] + client_matrix.get_release_tags(lang)
+    else:
+        # Look for a particular release.
+        if args.release not in ['master'] + client_matrix.get_release_tags(
+                lang):
+            jobset.message(
+                'SKIPPED',
+                '%s for %s is not defined' % (args.release, lang),
+                do_newline=True)
+            return {}
+        releases = [args.release]
 
-  # Images tuples keyed by runtime.
-  images = {}
-  for runtime in client_matrix.LANG_RUNTIME_MATRIX[lang]:
-    image_path = '%s/grpc_interop_%s' % (args.gcr_path, runtime)
-    output = subprocess.check_output(['gcloud', 'beta', 'container', 'images',
-                                      'list-tags', '--format=json', image_path])
-    docker_image_list = json.loads(output)
-    # All images should have a single tag or no tag.
-    # TODO(adelez): Remove tagless images.
-    tags = [i['tags'][0] for i in docker_image_list if i['tags']]
-    jobset.message('START', 'Found images for %s: %s' % (image_path, tags),
-                   do_newline=True)
-    skipped = len(docker_image_list) - len(tags)
-    jobset.message('SKIPPED', 'Skipped images (no-tag/unknown-tag): %d' % skipped,
-                   do_newline=True)
-    # Filter tags based on the releases.
-    images[runtime] = [(tag,'%s:%s' % (image_path,tag)) for tag in tags if
-                       tag in releases]
-  return images
+    # Images tuples keyed by runtime.
+    images = {}
+    for runtime in client_matrix.LANG_RUNTIME_MATRIX[lang]:
+        image_path = '%s/grpc_interop_%s' % (args.gcr_path, runtime)
+        output = subprocess.check_output([
+            'gcloud', 'beta', 'container', 'images', 'list-tags',
+            '--format=json', image_path
+        ])
+        docker_image_list = json.loads(output)
+        # All images should have a single tag or no tag.
+        # TODO(adelez): Remove tagless images.
+        tags = [i['tags'][0] for i in docker_image_list if i['tags']]
+        jobset.message(
+            'START',
+            'Found images for %s: %s' % (image_path, tags),
+            do_newline=True)
+        skipped = len(docker_image_list) - len(tags)
+        jobset.message(
+            'SKIPPED',
+            'Skipped images (no-tag/unknown-tag): %d' % skipped,
+            do_newline=True)
+        # Filter tags based on the releases.
+        images[runtime] = [(tag, '%s:%s' % (image_path, tag)) for tag in tags
+                           if tag in releases]
+    return images
+
 
 # caches test cases (list of JobSpec) loaded from file.  Keyed by lang and runtime.
 def find_test_cases(lang, runtime, release, suite_name):
-  """Returns the list of test cases from testcase files per lang/release."""
-  file_tmpl = os.path.join(os.path.dirname(__file__), 'testcases/%s__%s')
-  testcase_release = release
-  filename_prefix = lang
-  if lang == 'csharp':
-    filename_prefix = runtime
-  if not os.path.exists(file_tmpl % (filename_prefix, release)):
-    testcase_release = 'master'
-  testcases = file_tmpl % (filename_prefix, testcase_release)
+    """Returns the list of test cases from testcase files per lang/release."""
+    file_tmpl = os.path.join(os.path.dirname(__file__), 'testcases/%s__%s')
+    testcase_release = release
+    filename_prefix = lang
+    if lang == 'csharp':
+        filename_prefix = runtime
+    if not os.path.exists(file_tmpl % (filename_prefix, release)):
+        testcase_release = 'master'
+    testcases = file_tmpl % (filename_prefix, testcase_release)
 
-  job_spec_list=[]
-  try:
-    with open(testcases) as f:
-      # Only line start with 'docker run' are test cases.
-      for line in f.readlines():
-        if line.startswith('docker run'):
-          m = re.search('--test_case=(.*)"', line)
-          shortname = m.group(1) if m else 'unknown_test'
-          m = re.search('--server_host_override=(.*).sandbox.googleapis.com', 
+    job_spec_list = []
+    try:
+        with open(testcases) as f:
+            # Only line start with 'docker run' are test cases.
+            for line in f.readlines():
+                if line.startswith('docker run'):
+                    m = re.search('--test_case=(.*)"', line)
+                    shortname = m.group(1) if m else 'unknown_test'
+                    m = re.search(
+                        '--server_host_override=(.*).sandbox.googleapis.com',
                         line)
-          server = m.group(1) if m else 'unknown_server'
-          spec = jobset.JobSpec(cmdline=line,
-                                shortname='%s:%s:%s:%s' % (suite_name, lang, 
-                                                           server, shortname),
-                                timeout_seconds=_TEST_TIMEOUT,
-                                shell=True,
-                                flake_retries=5 if args.allow_flakes else 0)
-          job_spec_list.append(spec)
-      jobset.message('START',
-                     'Loaded %s tests from %s' % (len(job_spec_list), testcases),
-                     do_newline=True)
-  except IOError as err:
-    jobset.message('FAILED', err, do_newline=True)
-  return job_spec_list
+                    server = m.group(1) if m else 'unknown_server'
+                    spec = jobset.JobSpec(
+                        cmdline=line,
+                        shortname='%s:%s:%s:%s' % (suite_name, lang, server,
+                                                   shortname),
+                        timeout_seconds=_TEST_TIMEOUT,
+                        shell=True,
+                        flake_retries=5 if args.allow_flakes else 0)
+                    job_spec_list.append(spec)
+            jobset.message(
+                'START',
+                'Loaded %s tests from %s' % (len(job_spec_list), testcases),
+                do_newline=True)
+    except IOError as err:
+        jobset.message('FAILED', err, do_newline=True)
+    return job_spec_list
+
 
 _xml_report_tree = report_utils.new_junit_xml_tree()
+
+
 def run_tests_for_lang(lang, runtime, images):
-  """Find and run all test cases for a language.
+    """Find and run all test cases for a language.
 
   images is a list of (<release-tag>, <image-full-path>) tuple.
   """
-  total_num_failures = 0
-  for image_tuple in images:
-    release, image = image_tuple
-    jobset.message('START', 'Testing %s' % image, do_newline=True)
-    # Download the docker image before running each test case.
-    subprocess.check_call(['gcloud', 'docker', '--', 'pull', image])
-    suite_name = '%s__%s_%s' % (lang, runtime, release)
-    job_spec_list = find_test_cases(lang, runtime, release, suite_name)
-    
-    if not job_spec_list:  
-      jobset.message('FAILED', 'No test cases were found.', do_newline=True)
-      return 1
+    total_num_failures = 0
+    for image_tuple in images:
+        release, image = image_tuple
+        jobset.message('START', 'Testing %s' % image, do_newline=True)
+        # Download the docker image before running each test case.
+        subprocess.check_call(['gcloud', 'docker', '--', 'pull', image])
+        suite_name = '%s__%s_%s' % (lang, runtime, release)
+        job_spec_list = find_test_cases(lang, runtime, release, suite_name)
 
-    num_failures, resultset = jobset.run(job_spec_list,
-                                         newline_on_success=True,
-                                         add_env={'docker_image':image},
-                                         maxjobs=args.jobs)
-    if args.bq_result_table and resultset:
-      upload_test_results.upload_interop_results_to_bq(
-          resultset, args.bq_result_table, args)
-    if num_failures:
-      jobset.message('FAILED', 'Some tests failed', do_newline=True)
-      total_num_failures += num_failures
-    else:
-      jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+        if not job_spec_list:
+            jobset.message(
+                'FAILED', 'No test cases were found.', do_newline=True)
+            return 1
 
-    report_utils.append_junit_xml_results(
-        _xml_report_tree,
-        resultset,
-        'grpc_interop_matrix',
-        suite_name,
-        str(uuid.uuid4()))
+        num_failures, resultset = jobset.run(
+            job_spec_list,
+            newline_on_success=True,
+            add_env={'docker_image': image},
+            maxjobs=args.jobs)
+        if args.bq_result_table and resultset:
+            upload_test_results.upload_interop_results_to_bq(
+                resultset, args.bq_result_table, args)
+        if num_failures:
+            jobset.message('FAILED', 'Some tests failed', do_newline=True)
+            total_num_failures += num_failures
+        else:
+            jobset.message('SUCCESS', 'All tests passed', do_newline=True)
 
-    if not args.keep:
-      cleanup(image)
-  
-  return total_num_failures
+        report_utils.append_junit_xml_results(_xml_report_tree, resultset,
+                                              'grpc_interop_matrix', suite_name,
+                                              str(uuid.uuid4()))
+
+        if not args.keep:
+            cleanup(image)
+
+    return total_num_failures
 
 
 def cleanup(image):
-  jobset.message('START', 'Cleanup docker image %s' % image, do_newline=True)
-  dockerjob.remove_image(image, skip_nonexistent=True)
+    jobset.message('START', 'Cleanup docker image %s' % image, do_newline=True)
+    dockerjob.remove_image(image, skip_nonexistent=True)
 
 
 languages = args.language if args.language != ['all'] else _LANGUAGES
 total_num_failures = 0
 for lang in languages:
-  docker_images = find_all_images_for_lang(lang)
-  for runtime in sorted(docker_images.keys()):
-    total_num_failures += run_tests_for_lang(lang, runtime, docker_images[runtime])
+    docker_images = find_all_images_for_lang(lang)
+    for runtime in sorted(docker_images.keys()):
+        total_num_failures += run_tests_for_lang(lang, runtime,
+                                                 docker_images[runtime])
 
 report_utils.create_xml_report_file(_xml_report_tree, args.report_file)
 
 if total_num_failures:
-  sys.exit(1)
+    sys.exit(1)
 sys.exit(0)
diff --git a/tools/line_count/collect-history.py b/tools/line_count/collect-history.py
index 3f030fb..d2d5c95 100755
--- a/tools/line_count/collect-history.py
+++ b/tools/line_count/collect-history.py
@@ -19,20 +19,23 @@
 # this script is only of historical interest: it's the script that was used to
 # bootstrap the dataset
 
+
 def daterange(start, end):
-  for n in range(int((end - start).days)):
-    yield start + datetime.timedelta(n)
+    for n in range(int((end - start).days)):
+        yield start + datetime.timedelta(n)
+
 
 start_date = datetime.date(2017, 3, 26)
 end_date = datetime.date(2017, 3, 29)
 
 for dt in daterange(start_date, end_date):
-  dmy = dt.strftime('%Y-%m-%d')
-  sha1 = subprocess.check_output(['git', 'rev-list', '-n', '1',
-                                  '--before=%s' % dmy,
-                                  'master']).strip()
-  subprocess.check_call(['git', 'checkout', sha1])
-  subprocess.check_call(['git', 'submodule', 'update'])
-  subprocess.check_call(['git', 'clean', '-f', '-x', '-d'])
-  subprocess.check_call(['cloc', '--vcs=git', '--by-file', '--yaml', '--out=../count/%s.yaml' % dmy, '.'])
-
+    dmy = dt.strftime('%Y-%m-%d')
+    sha1 = subprocess.check_output(
+        ['git', 'rev-list', '-n', '1', '--before=%s' % dmy, 'master']).strip()
+    subprocess.check_call(['git', 'checkout', sha1])
+    subprocess.check_call(['git', 'submodule', 'update'])
+    subprocess.check_call(['git', 'clean', '-f', '-x', '-d'])
+    subprocess.check_call([
+        'cloc', '--vcs=git', '--by-file', '--yaml',
+        '--out=../count/%s.yaml' % dmy, '.'
+    ])
diff --git a/tools/line_count/summarize-history.py b/tools/line_count/summarize-history.py
index d2ef7ec..80b0ed7 100755
--- a/tools/line_count/summarize-history.py
+++ b/tools/line_count/summarize-history.py
@@ -13,22 +13,25 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
 import subprocess
 import datetime
 
 # this script is only of historical interest: it's the script that was used to
 # bootstrap the dataset
 
+
 def daterange(start, end):
-  for n in range(int((end - start).days)):
-    yield start + datetime.timedelta(n)
+    for n in range(int((end - start).days)):
+        yield start + datetime.timedelta(n)
+
 
 start_date = datetime.date(2017, 3, 26)
 end_date = datetime.date(2017, 3, 29)
 
 for dt in daterange(start_date, end_date):
-  dmy = dt.strftime('%Y-%m-%d')
-  print dmy
-  subprocess.check_call(['tools/line_count/yaml2csv.py', '-i', '../count/%s.yaml' % dmy, '-d', dmy, '-o', '../count/%s.csv' % dmy])
-
+    dmy = dt.strftime('%Y-%m-%d')
+    print dmy
+    subprocess.check_call([
+        'tools/line_count/yaml2csv.py', '-i', '../count/%s.yaml' % dmy, '-d',
+        dmy, '-o', '../count/%s.csv' % dmy
+    ])
diff --git a/tools/line_count/yaml2csv.py b/tools/line_count/yaml2csv.py
index 2a38a12..dd2e92b 100755
--- a/tools/line_count/yaml2csv.py
+++ b/tools/line_count/yaml2csv.py
@@ -13,7 +13,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
 import yaml
 import argparse
 import datetime
@@ -21,18 +20,22 @@
 
 argp = argparse.ArgumentParser(description='Convert cloc yaml to bigquery csv')
 argp.add_argument('-i', '--input', type=str)
-argp.add_argument('-d', '--date', type=str, default=datetime.date.today().strftime('%Y-%m-%d'))
+argp.add_argument(
+    '-d',
+    '--date',
+    type=str,
+    default=datetime.date.today().strftime('%Y-%m-%d'))
 argp.add_argument('-o', '--output', type=str, default='out.csv')
 args = argp.parse_args()
 
 data = yaml.load(open(args.input).read())
 with open(args.output, 'w') as outf:
-  writer = csv.DictWriter(outf, ['date', 'name', 'language', 'code', 'comment', 'blank'])
-  for key, value in data.iteritems():
-    if key == 'header': continue
-    if key == 'SUM': continue
-    if key.startswith('third_party/'): continue
-    row = {'name': key, 'date': args.date}
-    row.update(value)
-    writer.writerow(row)
-
+    writer = csv.DictWriter(
+        outf, ['date', 'name', 'language', 'code', 'comment', 'blank'])
+    for key, value in data.iteritems():
+        if key == 'header': continue
+        if key == 'SUM': continue
+        if key.startswith('third_party/'): continue
+        row = {'name': key, 'date': args.date}
+        row.update(value)
+        writer.writerow(row)
diff --git a/tools/mkowners/mkowners.py b/tools/mkowners/mkowners.py
index e0ad998..d8b3d3c 100755
--- a/tools/mkowners/mkowners.py
+++ b/tools/mkowners/mkowners.py
@@ -24,10 +24,8 @@
 # Find the root of the git tree
 #
 
-git_root = (subprocess
-            .check_output(['git', 'rev-parse', '--show-toplevel'])
-            .decode('utf-8')
-            .strip())
+git_root = (subprocess.check_output(['git', 'rev-parse', '--show-toplevel'])
+            .decode('utf-8').strip())
 
 #
 # Parse command line arguments
@@ -36,19 +34,22 @@
 default_out = os.path.join(git_root, '.github', 'CODEOWNERS')
 
 argp = argparse.ArgumentParser('Generate .github/CODEOWNERS file')
-argp.add_argument('--out', '-o',
-                  type=str,
-                  default=default_out,
-                  help='Output file (default %s)' % default_out)
+argp.add_argument(
+    '--out',
+    '-o',
+    type=str,
+    default=default_out,
+    help='Output file (default %s)' % default_out)
 args = argp.parse_args()
 
 #
 # Walk git tree to locate all OWNERS files
 #
 
-owners_files = [os.path.join(root, 'OWNERS')
-                for root, dirs, files in os.walk(git_root)
-                if 'OWNERS' in files]
+owners_files = [
+    os.path.join(root, 'OWNERS') for root, dirs, files in os.walk(git_root)
+    if 'OWNERS' in files
+]
 
 #
 # Parse owners files
@@ -57,39 +58,40 @@
 Owners = collections.namedtuple('Owners', 'parent directives dir')
 Directive = collections.namedtuple('Directive', 'who globs')
 
-def parse_owners(filename):
-  with open(filename) as f:
-    src = f.read().splitlines()
-  parent = True
-  directives = []
-  for line in src:
-    line = line.strip()
-    # line := directive | comment
-    if not line: continue
-    if line[0] == '#': continue
-    # it's a directive
-    directive = None
-    if line == 'set noparent':
-      parent = False
-    elif line == '*':
-      directive = Directive(who='*', globs=[])
-    elif ' ' in line:
-      (who, globs) = line.split(' ', 1)
-      globs_list = [glob
-                    for glob in globs.split(' ')
-                    if glob]
-      directive = Directive(who=who, globs=globs_list)
-    else:
-      directive = Directive(who=line, globs=[])
-    if directive:
-      directives.append(directive)
-  return Owners(parent=parent,
-                directives=directives,
-                dir=os.path.relpath(os.path.dirname(filename), git_root))
 
-owners_data = sorted([parse_owners(filename)
-                      for filename in owners_files],
-                     key=operator.attrgetter('dir'))
+def parse_owners(filename):
+    with open(filename) as f:
+        src = f.read().splitlines()
+    parent = True
+    directives = []
+    for line in src:
+        line = line.strip()
+        # line := directive | comment
+        if not line: continue
+        if line[0] == '#': continue
+        # it's a directive
+        directive = None
+        if line == 'set noparent':
+            parent = False
+        elif line == '*':
+            directive = Directive(who='*', globs=[])
+        elif ' ' in line:
+            (who, globs) = line.split(' ', 1)
+            globs_list = [glob for glob in globs.split(' ') if glob]
+            directive = Directive(who=who, globs=globs_list)
+        else:
+            directive = Directive(who=line, globs=[])
+        if directive:
+            directives.append(directive)
+    return Owners(
+        parent=parent,
+        directives=directives,
+        dir=os.path.relpath(os.path.dirname(filename), git_root))
+
+
+owners_data = sorted(
+    [parse_owners(filename) for filename in owners_files],
+    key=operator.attrgetter('dir'))
 
 #
 # Modify owners so that parented OWNERS files point to the actual
@@ -98,24 +100,24 @@
 
 new_owners_data = []
 for owners in owners_data:
-  if owners.parent == True:
-    best_parent = None
-    best_parent_score = None
-    for possible_parent in owners_data:
-      if possible_parent is owners: continue
-      rel = os.path.relpath(owners.dir, possible_parent.dir)
-      # '..' ==> we had to walk up from possible_parent to get to owners
-      #      ==> not a parent
-      if '..' in rel: continue
-      depth = len(rel.split(os.sep))
-      if not best_parent or depth < best_parent_score:
-        best_parent = possible_parent
-        best_parent_score = depth
-    if best_parent:
-      owners = owners._replace(parent = best_parent.dir)
-    else:
-      owners = owners._replace(parent = None)
-  new_owners_data.append(owners)
+    if owners.parent == True:
+        best_parent = None
+        best_parent_score = None
+        for possible_parent in owners_data:
+            if possible_parent is owners: continue
+            rel = os.path.relpath(owners.dir, possible_parent.dir)
+            # '..' ==> we had to walk up from possible_parent to get to owners
+            #      ==> not a parent
+            if '..' in rel: continue
+            depth = len(rel.split(os.sep))
+            if not best_parent or depth < best_parent_score:
+                best_parent = possible_parent
+                best_parent_score = depth
+        if best_parent:
+            owners = owners._replace(parent=best_parent.dir)
+        else:
+            owners = owners._replace(parent=None)
+    new_owners_data.append(owners)
 owners_data = new_owners_data
 
 #
@@ -123,106 +125,114 @@
 # a CODEOWNERS file for GitHub
 #
 
+
 def full_dir(rules_dir, sub_path):
-  return os.path.join(rules_dir, sub_path) if rules_dir != '.' else sub_path
+    return os.path.join(rules_dir, sub_path) if rules_dir != '.' else sub_path
+
 
 # glob using git
 gg_cache = {}
+
+
 def git_glob(glob):
-  global gg_cache
-  if glob in gg_cache: return gg_cache[glob]
-  r = set(subprocess
-      .check_output(['git', 'ls-files', os.path.join(git_root, glob)])
-      .decode('utf-8')
-      .strip()
-      .splitlines())
-  gg_cache[glob] = r
-  return r
+    global gg_cache
+    if glob in gg_cache: return gg_cache[glob]
+    r = set(
+        subprocess.check_output(
+            ['git', 'ls-files', os.path.join(git_root, glob)]).decode('utf-8')
+        .strip().splitlines())
+    gg_cache[glob] = r
+    return r
+
 
 def expand_directives(root, directives):
-  globs = collections.OrderedDict()
-  # build a table of glob --> owners
-  for directive in directives:
-    for glob in directive.globs or ['**']:
-      if glob not in globs:
-        globs[glob] = []
-      if directive.who not in globs[glob]:
-        globs[glob].append(directive.who)
-  # expand owners for intersecting globs
-  sorted_globs = sorted(globs.keys(),
-                        key=lambda g: len(git_glob(full_dir(root, g))),
-                        reverse=True)
-  out_globs = collections.OrderedDict()
-  for glob_add in sorted_globs:
-    who_add = globs[glob_add]
-    pre_items = [i for i in out_globs.items()]
-    out_globs[glob_add] = who_add.copy()
-    for glob_have, who_have in pre_items:
-      files_add = git_glob(full_dir(root, glob_add))
-      files_have = git_glob(full_dir(root, glob_have))
-      intersect = files_have.intersection(files_add)
-      if intersect:
-        for f in sorted(files_add): # sorted to ensure merge stability
-          if f not in intersect:
-            out_globs[os.path.relpath(f, start=root)] = who_add
-        for who in who_have:
-          if who not in out_globs[glob_add]:
-            out_globs[glob_add].append(who)
-  return out_globs
+    globs = collections.OrderedDict()
+    # build a table of glob --> owners
+    for directive in directives:
+        for glob in directive.globs or ['**']:
+            if glob not in globs:
+                globs[glob] = []
+            if directive.who not in globs[glob]:
+                globs[glob].append(directive.who)
+    # expand owners for intersecting globs
+    sorted_globs = sorted(
+        globs.keys(),
+        key=lambda g: len(git_glob(full_dir(root, g))),
+        reverse=True)
+    out_globs = collections.OrderedDict()
+    for glob_add in sorted_globs:
+        who_add = globs[glob_add]
+        pre_items = [i for i in out_globs.items()]
+        out_globs[glob_add] = who_add.copy()
+        for glob_have, who_have in pre_items:
+            files_add = git_glob(full_dir(root, glob_add))
+            files_have = git_glob(full_dir(root, glob_have))
+            intersect = files_have.intersection(files_add)
+            if intersect:
+                for f in sorted(files_add):  # sorted to ensure merge stability
+                    if f not in intersect:
+                        out_globs[os.path.relpath(f, start=root)] = who_add
+                for who in who_have:
+                    if who not in out_globs[glob_add]:
+                        out_globs[glob_add].append(who)
+    return out_globs
+
 
 def add_parent_to_globs(parent, globs, globs_dir):
-  if not parent: return
-  for owners in owners_data:
-    if owners.dir == parent:
-      owners_globs = expand_directives(owners.dir, owners.directives)
-      for oglob, oglob_who in owners_globs.items():
-        for gglob, gglob_who in globs.items():
-          files_parent = git_glob(full_dir(owners.dir, oglob))
-          files_child = git_glob(full_dir(globs_dir, gglob))
-          intersect = files_parent.intersection(files_child)
-          gglob_who_orig = gglob_who.copy()
-          if intersect:
-            for f in sorted(files_child): # sorted to ensure merge stability
-              if f not in intersect:
-                who = gglob_who_orig.copy()
-                globs[os.path.relpath(f, start=globs_dir)] = who
-            for who in oglob_who:
-              if who not in gglob_who:
-                gglob_who.append(who)
-      add_parent_to_globs(owners.parent, globs, globs_dir)
-      return
-  assert(False)
+    if not parent: return
+    for owners in owners_data:
+        if owners.dir == parent:
+            owners_globs = expand_directives(owners.dir, owners.directives)
+            for oglob, oglob_who in owners_globs.items():
+                for gglob, gglob_who in globs.items():
+                    files_parent = git_glob(full_dir(owners.dir, oglob))
+                    files_child = git_glob(full_dir(globs_dir, gglob))
+                    intersect = files_parent.intersection(files_child)
+                    gglob_who_orig = gglob_who.copy()
+                    if intersect:
+                        for f in sorted(files_child
+                                       ):  # sorted to ensure merge stability
+                            if f not in intersect:
+                                who = gglob_who_orig.copy()
+                                globs[os.path.relpath(f, start=globs_dir)] = who
+                        for who in oglob_who:
+                            if who not in gglob_who:
+                                gglob_who.append(who)
+            add_parent_to_globs(owners.parent, globs, globs_dir)
+            return
+    assert (False)
+
 
 todo = owners_data.copy()
 done = set()
 with open(args.out, 'w') as out:
-  out.write('# Auto-generated by the tools/mkowners/mkowners.py tool\n')
-  out.write('# Uses OWNERS files in different modules throughout the\n')
-  out.write('# repository as the source of truth for module ownership.\n')
-  written_globs = []
-  while todo:
-    head, *todo = todo
-    if head.parent and not head.parent in done:
-      todo.append(head)
-      continue
-    globs = expand_directives(head.dir, head.directives)
-    add_parent_to_globs(head.parent, globs, head.dir)
-    for glob, owners in globs.items():
-      skip = False
-      for glob1, owners1, dir1 in reversed(written_globs):
-        files = git_glob(full_dir(head.dir, glob))
-        files1 = git_glob(full_dir(dir1, glob1))
-        intersect = files.intersection(files1)
-        if files == intersect:
-          if sorted(owners) == sorted(owners1):
-            skip = True # nothing new in this rule
-            break
-        elif intersect:
-          # continuing would cause a semantic change since some files are
-          # affected differently by this rule and CODEOWNERS is order dependent
-          break
-      if not skip:
-        out.write('/%s %s\n' % (
-            full_dir(head.dir, glob), ' '.join(owners)))
-        written_globs.append((glob, owners, head.dir))
-    done.add(head.dir)
+    out.write('# Auto-generated by the tools/mkowners/mkowners.py tool\n')
+    out.write('# Uses OWNERS files in different modules throughout the\n')
+    out.write('# repository as the source of truth for module ownership.\n')
+    written_globs = []
+    while todo:
+        head, *todo = todo
+        if head.parent and not head.parent in done:
+            todo.append(head)
+            continue
+        globs = expand_directives(head.dir, head.directives)
+        add_parent_to_globs(head.parent, globs, head.dir)
+        for glob, owners in globs.items():
+            skip = False
+            for glob1, owners1, dir1 in reversed(written_globs):
+                files = git_glob(full_dir(head.dir, glob))
+                files1 = git_glob(full_dir(dir1, glob1))
+                intersect = files.intersection(files1)
+                if files == intersect:
+                    if sorted(owners) == sorted(owners1):
+                        skip = True  # nothing new in this rule
+                        break
+                elif intersect:
+                    # continuing would cause a semantic change since some files are
+                    # affected differently by this rule and CODEOWNERS is order dependent
+                    break
+            if not skip:
+                out.write('/%s %s\n' % (full_dir(head.dir, glob),
+                                        ' '.join(owners)))
+                written_globs.append((glob, owners, head.dir))
+        done.add(head.dir)
diff --git a/tools/profiling/bloat/bloat_diff.py b/tools/profiling/bloat/bloat_diff.py
index 9b40685..91611c2 100755
--- a/tools/profiling/bloat/bloat_diff.py
+++ b/tools/profiling/bloat/bloat_diff.py
@@ -23,12 +23,11 @@
 import sys
 
 sys.path.append(
-  os.path.join(
-    os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
 import comment_on_pr
 
-argp = argparse.ArgumentParser(
-    description='Perform diff on microbenchmarks')
+argp = argparse.ArgumentParser(description='Perform diff on microbenchmarks')
 
 argp.add_argument(
     '-d',
@@ -36,64 +35,59 @@
     type=str,
     help='Commit or branch to compare the current one to')
 
-argp.add_argument(
-    '-j',
-    '--jobs',
-    type=int,
-    default=multiprocessing.cpu_count())
+argp.add_argument('-j', '--jobs', type=int, default=multiprocessing.cpu_count())
 
 args = argp.parse_args()
 
 LIBS = [
-  'libgrpc.so',
-  'libgrpc++.so',
+    'libgrpc.so',
+    'libgrpc++.so',
 ]
 
+
 def build(where):
-  subprocess.check_call('make -j%d' % args.jobs,
-                        shell=True, cwd='.')
-  shutil.rmtree('bloat_diff_%s' % where, ignore_errors=True)
-  os.rename('libs', 'bloat_diff_%s' % where)
+    subprocess.check_call('make -j%d' % args.jobs, shell=True, cwd='.')
+    shutil.rmtree('bloat_diff_%s' % where, ignore_errors=True)
+    os.rename('libs', 'bloat_diff_%s' % where)
+
 
 build('new')
 
 if args.diff_base:
     old = 'old'
     where_am_i = subprocess.check_output(
-      ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
+        ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
     subprocess.check_call(['git', 'checkout', args.diff_base])
     subprocess.check_call(['git', 'submodule', 'update'])
     try:
-      try:
-        build('old')
-      except subprocess.CalledProcessError, e:
-        subprocess.check_call(['make', 'clean'])
-        build('old')
+        try:
+            build('old')
+        except subprocess.CalledProcessError, e:
+            subprocess.check_call(['make', 'clean'])
+            build('old')
     finally:
-      subprocess.check_call(['git', 'checkout', where_am_i])
-      subprocess.check_call(['git', 'submodule', 'update'])
+        subprocess.check_call(['git', 'checkout', where_am_i])
+        subprocess.check_call(['git', 'submodule', 'update'])
 
-subprocess.check_call('make -j%d' % args.jobs,
-                      shell=True, cwd='third_party/bloaty')
+subprocess.check_call(
+    'make -j%d' % args.jobs, shell=True, cwd='third_party/bloaty')
 
 text = ''
 for lib in LIBS:
-  text += '****************************************************************\n\n'
-  text += lib + '\n\n'
-  old_version = glob.glob('bloat_diff_old/opt/%s' % lib)
-  new_version = glob.glob('bloat_diff_new/opt/%s' % lib)
-  assert len(new_version) == 1
-  cmd = 'third_party/bloaty/bloaty -d compileunits,symbols'
-  if old_version:
-    assert len(old_version) == 1
-    text += subprocess.check_output('%s %s -- %s' %
-                                    (cmd, new_version[0], old_version[0]),
-                                    shell=True)
-  else:
-    text += subprocess.check_output('%s %s' %
-                                    (cmd, new_version[0]),
-                                    shell=True)
-  text += '\n\n'
+    text += '****************************************************************\n\n'
+    text += lib + '\n\n'
+    old_version = glob.glob('bloat_diff_old/opt/%s' % lib)
+    new_version = glob.glob('bloat_diff_new/opt/%s' % lib)
+    assert len(new_version) == 1
+    cmd = 'third_party/bloaty/bloaty -d compileunits,symbols'
+    if old_version:
+        assert len(old_version) == 1
+        text += subprocess.check_output(
+            '%s %s -- %s' % (cmd, new_version[0], old_version[0]), shell=True)
+    else:
+        text += subprocess.check_output(
+            '%s %s' % (cmd, new_version[0]), shell=True)
+    text += '\n\n'
 
 print text
 comment_on_pr.comment_on_pr('```\n%s\n```' % text)
diff --git a/tools/profiling/latency_profile/profile_analyzer.py b/tools/profiling/latency_profile/profile_analyzer.py
index 8a19afb..e3d3357 100755
--- a/tools/profiling/latency_profile/profile_analyzer.py
+++ b/tools/profiling/latency_profile/profile_analyzer.py
@@ -23,7 +23,6 @@
 import tabulate
 import time
 
-
 SELF_TIME = object()
 TIME_FROM_SCOPE_START = object()
 TIME_TO_SCOPE_END = object()
@@ -31,124 +30,129 @@
 TIME_TO_STACK_END = object()
 TIME_FROM_LAST_IMPORTANT = object()
 
-
-argp = argparse.ArgumentParser(description='Process output of basic_prof builds')
+argp = argparse.ArgumentParser(
+    description='Process output of basic_prof builds')
 argp.add_argument('--source', default='latency_trace.txt', type=str)
 argp.add_argument('--fmt', choices=tabulate.tabulate_formats, default='simple')
 argp.add_argument('--out', default='-', type=str)
 args = argp.parse_args()
 
+
 class LineItem(object):
 
-  def __init__(self, line, indent):
-    self.tag = line['tag']
-    self.indent = indent
-    self.start_time = line['t']
-    self.end_time = None
-    self.important = line['imp']
-    self.filename = line['file']
-    self.fileline = line['line']
-    self.times = {}
+    def __init__(self, line, indent):
+        self.tag = line['tag']
+        self.indent = indent
+        self.start_time = line['t']
+        self.end_time = None
+        self.important = line['imp']
+        self.filename = line['file']
+        self.fileline = line['line']
+        self.times = {}
 
 
 class ScopeBuilder(object):
 
-  def __init__(self, call_stack_builder, line):
-    self.call_stack_builder = call_stack_builder
-    self.indent = len(call_stack_builder.stk)
-    self.top_line = LineItem(line, self.indent)
-    call_stack_builder.lines.append(self.top_line)
-    self.first_child_pos = len(call_stack_builder.lines)
+    def __init__(self, call_stack_builder, line):
+        self.call_stack_builder = call_stack_builder
+        self.indent = len(call_stack_builder.stk)
+        self.top_line = LineItem(line, self.indent)
+        call_stack_builder.lines.append(self.top_line)
+        self.first_child_pos = len(call_stack_builder.lines)
 
-  def mark(self, line):
-    line_item = LineItem(line, self.indent + 1)
-    line_item.end_time = line_item.start_time
-    self.call_stack_builder.lines.append(line_item)
+    def mark(self, line):
+        line_item = LineItem(line, self.indent + 1)
+        line_item.end_time = line_item.start_time
+        self.call_stack_builder.lines.append(line_item)
 
-  def finish(self, line):
-    assert line['tag'] == self.top_line.tag, (
-        'expected %s, got %s; thread=%s; t0=%f t1=%f' %
-        (self.top_line.tag, line['tag'], line['thd'], self.top_line.start_time,
-         line['t']))
-    final_time_stamp = line['t']
-    assert self.top_line.end_time is None
-    self.top_line.end_time = final_time_stamp
-    self.top_line.important = self.top_line.important or line['imp']
-    assert SELF_TIME not in self.top_line.times
-    self.top_line.times[SELF_TIME] = final_time_stamp - self.top_line.start_time
-    for line in self.call_stack_builder.lines[self.first_child_pos:]:
-      if TIME_FROM_SCOPE_START not in line.times:
-        line.times[TIME_FROM_SCOPE_START] = line.start_time - self.top_line.start_time
-        line.times[TIME_TO_SCOPE_END] = final_time_stamp - line.end_time
+    def finish(self, line):
+        assert line['tag'] == self.top_line.tag, (
+            'expected %s, got %s; thread=%s; t0=%f t1=%f' %
+            (self.top_line.tag, line['tag'], line['thd'],
+             self.top_line.start_time, line['t']))
+        final_time_stamp = line['t']
+        assert self.top_line.end_time is None
+        self.top_line.end_time = final_time_stamp
+        self.top_line.important = self.top_line.important or line['imp']
+        assert SELF_TIME not in self.top_line.times
+        self.top_line.times[
+            SELF_TIME] = final_time_stamp - self.top_line.start_time
+        for line in self.call_stack_builder.lines[self.first_child_pos:]:
+            if TIME_FROM_SCOPE_START not in line.times:
+                line.times[
+                    TIME_FROM_SCOPE_START] = line.start_time - self.top_line.start_time
+                line.times[TIME_TO_SCOPE_END] = final_time_stamp - line.end_time
 
 
 class CallStackBuilder(object):
 
-  def __init__(self):
-    self.stk = []
-    self.signature = hashlib.md5()
-    self.lines = []
+    def __init__(self):
+        self.stk = []
+        self.signature = hashlib.md5()
+        self.lines = []
 
-  def finish(self):
-    start_time = self.lines[0].start_time
-    end_time = self.lines[0].end_time
-    self.signature = self.signature.hexdigest()
-    last_important = start_time
-    for line in self.lines:
-      line.times[TIME_FROM_STACK_START] = line.start_time - start_time
-      line.times[TIME_TO_STACK_END] = end_time - line.end_time
-      line.times[TIME_FROM_LAST_IMPORTANT] = line.start_time - last_important
-      if line.important:
-        last_important = line.end_time
-    last_important = end_time
+    def finish(self):
+        start_time = self.lines[0].start_time
+        end_time = self.lines[0].end_time
+        self.signature = self.signature.hexdigest()
+        last_important = start_time
+        for line in self.lines:
+            line.times[TIME_FROM_STACK_START] = line.start_time - start_time
+            line.times[TIME_TO_STACK_END] = end_time - line.end_time
+            line.times[
+                TIME_FROM_LAST_IMPORTANT] = line.start_time - last_important
+            if line.important:
+                last_important = line.end_time
+        last_important = end_time
 
-  def add(self, line):
-    line_type = line['type']
-    self.signature.update(line_type)
-    self.signature.update(line['tag'])
-    if line_type == '{':
-      self.stk.append(ScopeBuilder(self, line))
-      return False
-    elif line_type == '}':
-      assert self.stk, (
-          'expected non-empty stack for closing %s; thread=%s; t=%f' %
-          (line['tag'], line['thd'], line['t']))
-      self.stk.pop().finish(line)
-      if not self.stk:
-        self.finish()
-        return True
-      return False
-    elif line_type == '.' or line_type == '!':
-      self.stk[-1].mark(line)
-      return False
-    else:
-      raise Exception('Unknown line type: \'%s\'' % line_type)
+    def add(self, line):
+        line_type = line['type']
+        self.signature.update(line_type)
+        self.signature.update(line['tag'])
+        if line_type == '{':
+            self.stk.append(ScopeBuilder(self, line))
+            return False
+        elif line_type == '}':
+            assert self.stk, (
+                'expected non-empty stack for closing %s; thread=%s; t=%f' %
+                (line['tag'], line['thd'], line['t']))
+            self.stk.pop().finish(line)
+            if not self.stk:
+                self.finish()
+                return True
+            return False
+        elif line_type == '.' or line_type == '!':
+            self.stk[-1].mark(line)
+            return False
+        else:
+            raise Exception('Unknown line type: \'%s\'' % line_type)
 
 
 class CallStack(object):
 
-  def __init__(self, initial_call_stack_builder):
-    self.count = 1
-    self.signature = initial_call_stack_builder.signature
-    self.lines = initial_call_stack_builder.lines
-    for line in self.lines:
-      for key, val in line.times.items():
-        line.times[key] = [val]
+    def __init__(self, initial_call_stack_builder):
+        self.count = 1
+        self.signature = initial_call_stack_builder.signature
+        self.lines = initial_call_stack_builder.lines
+        for line in self.lines:
+            for key, val in line.times.items():
+                line.times[key] = [val]
 
-  def add(self, call_stack_builder):
-    assert self.signature == call_stack_builder.signature
-    self.count += 1
-    assert len(self.lines) == len(call_stack_builder.lines)
-    for lsum, line in itertools.izip(self.lines, call_stack_builder.lines):
-      assert lsum.tag == line.tag
-      assert lsum.times.keys() == line.times.keys()
-      for k, lst in lsum.times.iteritems():
-        lst.append(line.times[k])
+    def add(self, call_stack_builder):
+        assert self.signature == call_stack_builder.signature
+        self.count += 1
+        assert len(self.lines) == len(call_stack_builder.lines)
+        for lsum, line in itertools.izip(self.lines, call_stack_builder.lines):
+            assert lsum.tag == line.tag
+            assert lsum.times.keys() == line.times.keys()
+            for k, lst in lsum.times.iteritems():
+                lst.append(line.times[k])
 
-  def finish(self):
-    for line in self.lines:
-      for lst in line.times.itervalues():
-        lst.sort()
+    def finish(self):
+        for line in self.lines:
+            for lst in line.times.itervalues():
+                lst.sort()
+
 
 builder = collections.defaultdict(CallStackBuilder)
 call_stacks = collections.defaultdict(CallStack)
@@ -156,26 +160,28 @@
 lines = 0
 start = time.time()
 with open(args.source) as f:
-  for line in f:
-    lines += 1
-    inf = json.loads(line)
-    thd = inf['thd']
-    cs = builder[thd]
-    if cs.add(inf):
-      if cs.signature in call_stacks:
-        call_stacks[cs.signature].add(cs)
-      else:
-        call_stacks[cs.signature] = CallStack(cs)
-      del builder[thd]
+    for line in f:
+        lines += 1
+        inf = json.loads(line)
+        thd = inf['thd']
+        cs = builder[thd]
+        if cs.add(inf):
+            if cs.signature in call_stacks:
+                call_stacks[cs.signature].add(cs)
+            else:
+                call_stacks[cs.signature] = CallStack(cs)
+            del builder[thd]
 time_taken = time.time() - start
 
-call_stacks = sorted(call_stacks.values(), key=lambda cs: cs.count, reverse=True)
+call_stacks = sorted(
+    call_stacks.values(), key=lambda cs: cs.count, reverse=True)
 total_stacks = 0
 for cs in call_stacks:
-  total_stacks += cs.count
-  cs.finish()
+    total_stacks += cs.count
+    cs.finish()
 
-def percentile(N, percent, key=lambda x:x):
+
+def percentile(N, percent, key=lambda x: x):
     """
     Find the percentile of a list of values.
 
@@ -187,80 +193,83 @@
     """
     if not N:
         return None
-    k = (len(N)-1) * percent
+    k = (len(N) - 1) * percent
     f = math.floor(k)
     c = math.ceil(k)
     if f == c:
         return key(N[int(k)])
-    d0 = key(N[int(f)]) * (c-k)
-    d1 = key(N[int(c)]) * (k-f)
-    return d0+d1
+    d0 = key(N[int(f)]) * (c - k)
+    d1 = key(N[int(c)]) * (k - f)
+    return d0 + d1
+
 
 def tidy_tag(tag):
-  if tag[0:10] == 'GRPC_PTAG_':
-    return tag[10:]
-  return tag
+    if tag[0:10] == 'GRPC_PTAG_':
+        return tag[10:]
+    return tag
+
 
 def time_string(values):
-  num_values = len(values)
-  return '%.1f/%.1f/%.1f' % (
-      1e6 * percentile(values, 0.5),
-      1e6 * percentile(values, 0.9),
-      1e6 * percentile(values, 0.99))
+    num_values = len(values)
+    return '%.1f/%.1f/%.1f' % (1e6 * percentile(values, 0.5),
+                               1e6 * percentile(values, 0.9),
+                               1e6 * percentile(values, 0.99))
+
 
 def time_format(idx):
-  def ent(line, idx=idx):
-    if idx in line.times:
-      return time_string(line.times[idx])
-    return ''
-  return ent
 
-BANNER = {
-    'simple': 'Count: %(count)d',
-    'html': '<h1>Count: %(count)d</h1>'
-}
+    def ent(line, idx=idx):
+        if idx in line.times:
+            return time_string(line.times[idx])
+        return ''
+
+    return ent
+
+
+BANNER = {'simple': 'Count: %(count)d', 'html': '<h1>Count: %(count)d</h1>'}
 
 FORMAT = [
-  ('TAG', lambda line: '..'*line.indent + tidy_tag(line.tag)),
-  ('LOC', lambda line: '%s:%d' % (line.filename[line.filename.rfind('/')+1:], line.fileline)),
-  ('IMP', lambda line: '*' if line.important else ''),
-  ('FROM_IMP', time_format(TIME_FROM_LAST_IMPORTANT)),
-  ('FROM_STACK_START', time_format(TIME_FROM_STACK_START)),
-  ('SELF', time_format(SELF_TIME)),
-  ('TO_STACK_END', time_format(TIME_TO_STACK_END)),
-  ('FROM_SCOPE_START', time_format(TIME_FROM_SCOPE_START)),
-  ('SELF', time_format(SELF_TIME)),
-  ('TO_SCOPE_END', time_format(TIME_TO_SCOPE_END)),
+    ('TAG', lambda line: '..' * line.indent + tidy_tag(line.tag)),
+    ('LOC',
+     lambda line: '%s:%d' % (line.filename[line.filename.rfind('/') + 1:], line.fileline)
+    ),
+    ('IMP', lambda line: '*' if line.important else ''),
+    ('FROM_IMP', time_format(TIME_FROM_LAST_IMPORTANT)),
+    ('FROM_STACK_START', time_format(TIME_FROM_STACK_START)),
+    ('SELF', time_format(SELF_TIME)),
+    ('TO_STACK_END', time_format(TIME_TO_STACK_END)),
+    ('FROM_SCOPE_START', time_format(TIME_FROM_SCOPE_START)),
+    ('SELF', time_format(SELF_TIME)),
+    ('TO_SCOPE_END', time_format(TIME_TO_SCOPE_END)),
 ]
 
 out = sys.stdout
 if args.out != '-':
-  out = open(args.out, 'w')
+    out = open(args.out, 'w')
 
 if args.fmt == 'html':
-  print >>out, '<html>'
-  print >>out, '<head>'
-  print >>out, '<title>Profile Report</title>'
-  print >>out, '</head>'
+    print >> out, '<html>'
+    print >> out, '<head>'
+    print >> out, '<title>Profile Report</title>'
+    print >> out, '</head>'
 
 accounted_for = 0
 for cs in call_stacks:
-  if args.fmt in BANNER:
-    print >>out, BANNER[args.fmt] % {
-        'count': cs.count,
-    }
-  header, _ = zip(*FORMAT)
-  table = []
-  for line in cs.lines:
-    fields = []
-    for _, fn in FORMAT:
-      fields.append(fn(line))
-    table.append(fields)
-  print >>out, tabulate.tabulate(table, header, tablefmt=args.fmt)
-  accounted_for += cs.count
-  if accounted_for > .99 * total_stacks:
-    break
+    if args.fmt in BANNER:
+        print >> out, BANNER[args.fmt] % {
+            'count': cs.count,
+        }
+    header, _ = zip(*FORMAT)
+    table = []
+    for line in cs.lines:
+        fields = []
+        for _, fn in FORMAT:
+            fields.append(fn(line))
+        table.append(fields)
+    print >> out, tabulate.tabulate(table, header, tablefmt=args.fmt)
+    accounted_for += cs.count
+    if accounted_for > .99 * total_stacks:
+        break
 
 if args.fmt == 'html':
-  print '</html>'
-
+    print '</html>'
diff --git a/tools/profiling/microbenchmarks/bm2bq.py b/tools/profiling/microbenchmarks/bm2bq.py
index 9f9b672..746b643 100755
--- a/tools/profiling/microbenchmarks/bm2bq.py
+++ b/tools/profiling/microbenchmarks/bm2bq.py
@@ -28,37 +28,38 @@
 columns = []
 
 for row in json.loads(
-    subprocess.check_output([
-      'bq','--format=json','show','microbenchmarks.microbenchmarks']))['schema']['fields']:
-  columns.append((row['name'], row['type'].lower()))
+        subprocess.check_output([
+            'bq', '--format=json', 'show', 'microbenchmarks.microbenchmarks'
+        ]))['schema']['fields']:
+    columns.append((row['name'], row['type'].lower()))
 
 SANITIZE = {
-  'integer': int,
-  'float': float,
-  'boolean': bool,
-  'string': str,
-  'timestamp': str,
+    'integer': int,
+    'float': float,
+    'boolean': bool,
+    'string': str,
+    'timestamp': str,
 }
 
 if sys.argv[1] == '--schema':
-  print ',\n'.join('%s:%s' % (k, t.upper()) for k, t in columns)
-  sys.exit(0)
+    print ',\n'.join('%s:%s' % (k, t.upper()) for k, t in columns)
+    sys.exit(0)
 
 with open(sys.argv[1]) as f:
-  js = json.loads(f.read())
+    js = json.loads(f.read())
 
 if len(sys.argv) > 2:
-  with open(sys.argv[2]) as f:
-    js2 = json.loads(f.read())
+    with open(sys.argv[2]) as f:
+        js2 = json.loads(f.read())
 else:
-  js2 = None
+    js2 = None
 
-writer = csv.DictWriter(sys.stdout, [c for c,t in columns])
+writer = csv.DictWriter(sys.stdout, [c for c, t in columns])
 
 for row in bm_json.expand_json(js, js2):
-  sane_row = {}
-  for name, sql_type in columns:
-    if name in row:
-      if row[name] == '': continue
-      sane_row[name] = SANITIZE[sql_type](row[name])
-  writer.writerow(sane_row)
+    sane_row = {}
+    for name, sql_type in columns:
+        if name in row:
+            if row[name] == '': continue
+            sane_row[name] = SANITIZE[sql_type](row[name])
+    writer.writerow(sane_row)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_build.py b/tools/profiling/microbenchmarks/bm_diff/bm_build.py
index ce62c09..a4cd617 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_build.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_build.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """ Python utility to build opt and counters benchmarks """
 
 import bm_constants
@@ -26,55 +25,55 @@
 
 
 def _args():
-  argp = argparse.ArgumentParser(description='Builds microbenchmarks')
-  argp.add_argument(
-    '-b',
-    '--benchmarks',
-    nargs='+',
-    choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    help='Which benchmarks to build')
-  argp.add_argument(
-    '-j',
-    '--jobs',
-    type=int,
-    default=multiprocessing.cpu_count(),
-    help='How many CPUs to dedicate to this task')
-  argp.add_argument(
-    '-n',
-    '--name',
-    type=str,
-    help='Unique name of this build. To be used as a handle to pass to the other bm* scripts'
-  )
-  argp.add_argument('--counters', dest='counters', action='store_true')
-  argp.add_argument('--no-counters', dest='counters', action='store_false')
-  argp.set_defaults(counters=True)
-  args = argp.parse_args()
-  assert args.name
-  return args
+    argp = argparse.ArgumentParser(description='Builds microbenchmarks')
+    argp.add_argument(
+        '-b',
+        '--benchmarks',
+        nargs='+',
+        choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        help='Which benchmarks to build')
+    argp.add_argument(
+        '-j',
+        '--jobs',
+        type=int,
+        default=multiprocessing.cpu_count(),
+        help='How many CPUs to dedicate to this task')
+    argp.add_argument(
+        '-n',
+        '--name',
+        type=str,
+        help='Unique name of this build. To be used as a handle to pass to the other bm* scripts'
+    )
+    argp.add_argument('--counters', dest='counters', action='store_true')
+    argp.add_argument('--no-counters', dest='counters', action='store_false')
+    argp.set_defaults(counters=True)
+    args = argp.parse_args()
+    assert args.name
+    return args
 
 
 def _make_cmd(cfg, benchmarks, jobs):
-  return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs]
+    return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs]
 
 
 def build(name, benchmarks, jobs, counters):
-  shutil.rmtree('bm_diff_%s' % name, ignore_errors=True)
-  subprocess.check_call(['git', 'submodule', 'update'])
-  try:
-    subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
-    if counters:
-      subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
-  except subprocess.CalledProcessError, e:
-    subprocess.check_call(['make', 'clean'])
-    subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
-    if counters:
-      subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
-  os.rename(
-    'bins',
-    'bm_diff_%s' % name,)
+    shutil.rmtree('bm_diff_%s' % name, ignore_errors=True)
+    subprocess.check_call(['git', 'submodule', 'update'])
+    try:
+        subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
+        if counters:
+            subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
+    except subprocess.CalledProcessError, e:
+        subprocess.check_call(['make', 'clean'])
+        subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
+        if counters:
+            subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
+    os.rename(
+        'bins',
+        'bm_diff_%s' % name,)
 
 
 if __name__ == '__main__':
-  args = _args()
-  build(args.name, args.benchmarks, args.jobs, args.counters)
+    args = _args()
+    build(args.name, args.benchmarks, args.jobs, args.counters)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
index 0ec17fa..cff29db 100644
--- a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
@@ -13,19 +13,19 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """ Configurable constants for the bm_*.py family """
 
 _AVAILABLE_BENCHMARK_TESTS = [
-  'bm_fullstack_unary_ping_pong', 'bm_fullstack_streaming_ping_pong',
-  'bm_fullstack_streaming_pump', 'bm_closure', 'bm_cq', 'bm_call_create',
-  'bm_error', 'bm_chttp2_hpack', 'bm_chttp2_transport', 'bm_pollset',
-  'bm_metadata', 'bm_fullstack_trickle'
+    'bm_fullstack_unary_ping_pong', 'bm_fullstack_streaming_ping_pong',
+    'bm_fullstack_streaming_pump', 'bm_closure', 'bm_cq', 'bm_call_create',
+    'bm_error', 'bm_chttp2_hpack', 'bm_chttp2_transport', 'bm_pollset',
+    'bm_metadata', 'bm_fullstack_trickle'
 ]
 
-_INTERESTING = ('cpu_time', 'real_time', 'call_initial_size-median', 'locks_per_iteration',
-        'allocs_per_iteration', 'writes_per_iteration',
-        'atm_cas_per_iteration', 'atm_add_per_iteration',
-        'nows_per_iteration', 'cli_transport_stalls_per_iteration', 
-        'cli_stream_stalls_per_iteration', 'svr_transport_stalls_per_iteration',
-        'svr_stream_stalls_per_iteration', 'http2_pings_sent_per_iteration')
+_INTERESTING = (
+    'cpu_time', 'real_time', 'call_initial_size-median', 'locks_per_iteration',
+    'allocs_per_iteration', 'writes_per_iteration', 'atm_cas_per_iteration',
+    'atm_add_per_iteration', 'nows_per_iteration',
+    'cli_transport_stalls_per_iteration', 'cli_stream_stalls_per_iteration',
+    'svr_transport_stalls_per_iteration', 'svr_stream_stalls_per_iteration',
+    'http2_pings_sent_per_iteration')
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
index a41d0f0..b8a3b22 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
@@ -34,190 +34,195 @@
 
 
 def _median(ary):
-  assert (len(ary))
-  ary = sorted(ary)
-  n = len(ary)
-  if n % 2 == 0:
-    return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
-  else:
-    return ary[n / 2]
+    assert (len(ary))
+    ary = sorted(ary)
+    n = len(ary)
+    if n % 2 == 0:
+        return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
+    else:
+        return ary[n / 2]
 
 
 def _args():
-  argp = argparse.ArgumentParser(
-    description='Perform diff on microbenchmarks')
-  argp.add_argument(
-    '-t',
-    '--track',
-    choices=sorted(bm_constants._INTERESTING),
-    nargs='+',
-    default=sorted(bm_constants._INTERESTING),
-    help='Which metrics to track')
-  argp.add_argument(
-    '-b',
-    '--benchmarks',
-    nargs='+',
-    choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    help='Which benchmarks to run')
-  argp.add_argument(
-    '-l',
-    '--loops',
-    type=int,
-    default=20,
-    help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
-  )
-  argp.add_argument(
-    '-r',
-    '--regex',
-    type=str,
-    default="",
-    help='Regex to filter benchmarks run')
-  argp.add_argument('--counters', dest='counters', action='store_true')
-  argp.add_argument('--no-counters', dest='counters', action='store_false')
-  argp.set_defaults(counters=True)
-  argp.add_argument('-n', '--new', type=str, help='New benchmark name')
-  argp.add_argument('-o', '--old', type=str, help='Old benchmark name')
-  argp.add_argument(
-    '-v', '--verbose', type=bool, help='Print details of before/after')
-  args = argp.parse_args()
-  global verbose
-  if args.verbose: verbose = True
-  assert args.new
-  assert args.old
-  return args
+    argp = argparse.ArgumentParser(
+        description='Perform diff on microbenchmarks')
+    argp.add_argument(
+        '-t',
+        '--track',
+        choices=sorted(bm_constants._INTERESTING),
+        nargs='+',
+        default=sorted(bm_constants._INTERESTING),
+        help='Which metrics to track')
+    argp.add_argument(
+        '-b',
+        '--benchmarks',
+        nargs='+',
+        choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        help='Which benchmarks to run')
+    argp.add_argument(
+        '-l',
+        '--loops',
+        type=int,
+        default=20,
+        help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
+    )
+    argp.add_argument(
+        '-r',
+        '--regex',
+        type=str,
+        default="",
+        help='Regex to filter benchmarks run')
+    argp.add_argument('--counters', dest='counters', action='store_true')
+    argp.add_argument('--no-counters', dest='counters', action='store_false')
+    argp.set_defaults(counters=True)
+    argp.add_argument('-n', '--new', type=str, help='New benchmark name')
+    argp.add_argument('-o', '--old', type=str, help='Old benchmark name')
+    argp.add_argument(
+        '-v', '--verbose', type=bool, help='Print details of before/after')
+    args = argp.parse_args()
+    global verbose
+    if args.verbose: verbose = True
+    assert args.new
+    assert args.old
+    return args
 
 
 def _maybe_print(str):
-  if verbose: print str
+    if verbose: print str
 
 
 class Benchmark:
 
-  def __init__(self):
-    self.samples = {
-      True: collections.defaultdict(list),
-      False: collections.defaultdict(list)
-    }
-    self.final = {}
+    def __init__(self):
+        self.samples = {
+            True: collections.defaultdict(list),
+            False: collections.defaultdict(list)
+        }
+        self.final = {}
 
-  def add_sample(self, track, data, new):
-    for f in track:
-      if f in data:
-        self.samples[new][f].append(float(data[f]))
+    def add_sample(self, track, data, new):
+        for f in track:
+            if f in data:
+                self.samples[new][f].append(float(data[f]))
 
-  def process(self, track, new_name, old_name):
-    for f in sorted(track):
-      new = self.samples[True][f]
-      old = self.samples[False][f]
-      if not new or not old: continue
-      mdn_diff = abs(_median(new) - _median(old))
-      _maybe_print('%s: %s=%r %s=%r mdn_diff=%r' %
-             (f, new_name, new, old_name, old, mdn_diff))
-      s = bm_speedup.speedup(new, old, 1e-5)
-      if abs(s) > 3:
-        if mdn_diff > 0.5 or 'trickle' in f:
-          self.final[f] = '%+d%%' % s
-    return self.final.keys()
+    def process(self, track, new_name, old_name):
+        for f in sorted(track):
+            new = self.samples[True][f]
+            old = self.samples[False][f]
+            if not new or not old: continue
+            mdn_diff = abs(_median(new) - _median(old))
+            _maybe_print('%s: %s=%r %s=%r mdn_diff=%r' %
+                         (f, new_name, new, old_name, old, mdn_diff))
+            s = bm_speedup.speedup(new, old, 1e-5)
+            if abs(s) > 3:
+                if mdn_diff > 0.5 or 'trickle' in f:
+                    self.final[f] = '%+d%%' % s
+        return self.final.keys()
 
-  def skip(self):
-    return not self.final
+    def skip(self):
+        return not self.final
 
-  def row(self, flds):
-    return [self.final[f] if f in self.final else '' for f in flds]
+    def row(self, flds):
+        return [self.final[f] if f in self.final else '' for f in flds]
 
 
 def _read_json(filename, badjson_files, nonexistant_files):
-  stripped = ".".join(filename.split(".")[:-2])
-  try:
-    with open(filename) as f:
-      r = f.read();
-      return json.loads(r)
-  except IOError, e:
-    if stripped in nonexistant_files:
-      nonexistant_files[stripped] += 1
-    else:
-      nonexistant_files[stripped] = 1
-    return None
-  except ValueError, e:
-    print r
-    if stripped in badjson_files:
-      badjson_files[stripped] += 1
-    else:
-      badjson_files[stripped] = 1
-    return None
+    stripped = ".".join(filename.split(".")[:-2])
+    try:
+        with open(filename) as f:
+            r = f.read()
+            return json.loads(r)
+    except IOError, e:
+        if stripped in nonexistant_files:
+            nonexistant_files[stripped] += 1
+        else:
+            nonexistant_files[stripped] = 1
+        return None
+    except ValueError, e:
+        print r
+        if stripped in badjson_files:
+            badjson_files[stripped] += 1
+        else:
+            badjson_files[stripped] = 1
+        return None
+
 
 def fmt_dict(d):
-  return ''.join(["    " + k + ": " + str(d[k]) + "\n" for k in d])
+    return ''.join(["    " + k + ": " + str(d[k]) + "\n" for k in d])
+
 
 def diff(bms, loops, regex, track, old, new, counters):
-  benchmarks = collections.defaultdict(Benchmark)
+    benchmarks = collections.defaultdict(Benchmark)
 
-  badjson_files = {}
-  nonexistant_files = {}
-  for bm in bms:
-    for loop in range(0, loops):
-      for line in subprocess.check_output(
-        ['bm_diff_%s/opt/%s' % (old, bm),
-         '--benchmark_list_tests', 
-         '--benchmark_filter=%s' % regex]).splitlines():
-        stripped_line = line.strip().replace("/", "_").replace(
-          "<", "_").replace(">", "_").replace(", ", "_")
-        js_new_opt = _read_json('%s.%s.opt.%s.%d.json' %
-                    (bm, stripped_line, new, loop),
-                    badjson_files, nonexistant_files)
-        js_old_opt = _read_json('%s.%s.opt.%s.%d.json' %
-                    (bm, stripped_line, old, loop),
-                    badjson_files, nonexistant_files)
-        if counters:
-          js_new_ctr = _read_json('%s.%s.counters.%s.%d.json' %
-                      (bm, stripped_line, new, loop),
-                      badjson_files, nonexistant_files)
-          js_old_ctr = _read_json('%s.%s.counters.%s.%d.json' %
-                      (bm, stripped_line, old, loop),
-                      badjson_files, nonexistant_files)
+    badjson_files = {}
+    nonexistant_files = {}
+    for bm in bms:
+        for loop in range(0, loops):
+            for line in subprocess.check_output([
+                    'bm_diff_%s/opt/%s' % (old, bm), '--benchmark_list_tests',
+                    '--benchmark_filter=%s' % regex
+            ]).splitlines():
+                stripped_line = line.strip().replace("/", "_").replace(
+                    "<", "_").replace(">", "_").replace(", ", "_")
+                js_new_opt = _read_json('%s.%s.opt.%s.%d.json' %
+                                        (bm, stripped_line, new, loop),
+                                        badjson_files, nonexistant_files)
+                js_old_opt = _read_json('%s.%s.opt.%s.%d.json' %
+                                        (bm, stripped_line, old, loop),
+                                        badjson_files, nonexistant_files)
+                if counters:
+                    js_new_ctr = _read_json('%s.%s.counters.%s.%d.json' %
+                                            (bm, stripped_line, new, loop),
+                                            badjson_files, nonexistant_files)
+                    js_old_ctr = _read_json('%s.%s.counters.%s.%d.json' %
+                                            (bm, stripped_line, old, loop),
+                                            badjson_files, nonexistant_files)
+                else:
+                    js_new_ctr = None
+                    js_old_ctr = None
+
+                for row in bm_json.expand_json(js_new_ctr, js_new_opt):
+                    name = row['cpp_name']
+                    if name.endswith('_mean') or name.endswith('_stddev'):
+                        continue
+                    benchmarks[name].add_sample(track, row, True)
+                for row in bm_json.expand_json(js_old_ctr, js_old_opt):
+                    name = row['cpp_name']
+                    if name.endswith('_mean') or name.endswith('_stddev'):
+                        continue
+                    benchmarks[name].add_sample(track, row, False)
+
+    really_interesting = set()
+    for name, bm in benchmarks.items():
+        _maybe_print(name)
+        really_interesting.update(bm.process(track, new, old))
+    fields = [f for f in track if f in really_interesting]
+
+    headers = ['Benchmark'] + fields
+    rows = []
+    for name in sorted(benchmarks.keys()):
+        if benchmarks[name].skip(): continue
+        rows.append([name] + benchmarks[name].row(fields))
+    note = None
+    if len(badjson_files):
+        note = 'Corrupt JSON data (indicates timeout or crash): \n%s' % fmt_dict(
+            badjson_files)
+    if len(nonexistant_files):
+        if note:
+            note += '\n\nMissing files (indicates new benchmark): \n%s' % fmt_dict(
+                nonexistant_files)
         else:
-          js_new_ctr = None
-          js_old_ctr = None
-
-        for row in bm_json.expand_json(js_new_ctr, js_new_opt):
-          name = row['cpp_name']
-          if name.endswith('_mean') or name.endswith('_stddev'):
-            continue
-          benchmarks[name].add_sample(track, row, True)
-        for row in bm_json.expand_json(js_old_ctr, js_old_opt):
-          name = row['cpp_name']
-          if name.endswith('_mean') or name.endswith('_stddev'):
-            continue
-          benchmarks[name].add_sample(track, row, False)
-
-  really_interesting = set()
-  for name, bm in benchmarks.items():
-    _maybe_print(name)
-    really_interesting.update(bm.process(track, new, old))
-  fields = [f for f in track if f in really_interesting]
-
-  headers = ['Benchmark'] + fields
-  rows = []
-  for name in sorted(benchmarks.keys()):
-    if benchmarks[name].skip(): continue
-    rows.append([name] + benchmarks[name].row(fields))
-  note = None
-  if len(badjson_files):
-    note = 'Corrupt JSON data (indicates timeout or crash): \n%s' % fmt_dict(badjson_files)
-  if len(nonexistant_files):
-    if note:
-      note += '\n\nMissing files (indicates new benchmark): \n%s' % fmt_dict(nonexistant_files)
+            note = '\n\nMissing files (indicates new benchmark): \n%s' % fmt_dict(
+                nonexistant_files)
+    if rows:
+        return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f'), note
     else:
-      note = '\n\nMissing files (indicates new benchmark): \n%s' % fmt_dict(nonexistant_files)
-  if rows:
-    return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f'), note
-  else:
-    return None, note
+        return None, note
 
 
 if __name__ == '__main__':
-  args = _args()
-  diff, note = diff(args.benchmarks, args.loops, args.regex, args.track, args.old,
-            args.new, args.counters)
-  print('%s\n%s' % (note, diff if diff else "No performance differences"))
+    args = _args()
+    diff, note = diff(args.benchmarks, args.loops, args.regex, args.track,
+                      args.old, args.new, args.counters)
+    print('%s\n%s' % (note, diff if diff else "No performance differences"))
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_main.py b/tools/profiling/microbenchmarks/bm_diff/bm_main.py
index 74b7174..137c22b 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_main.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_main.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """ Runs the entire bm_*.py pipeline, and possible comments on the PR """
 
 import bm_constants
@@ -29,129 +28,132 @@
 import subprocess
 
 sys.path.append(
-  os.path.join(
-    os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
 import comment_on_pr
 
 sys.path.append(
-  os.path.join(
-    os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
-    'python_utils'))
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
+        'python_utils'))
 import jobset
 
 
 def _args():
-  argp = argparse.ArgumentParser(
-    description='Perform diff on microbenchmarks')
-  argp.add_argument(
-    '-t',
-    '--track',
-    choices=sorted(bm_constants._INTERESTING),
-    nargs='+',
-    default=sorted(bm_constants._INTERESTING),
-    help='Which metrics to track')
-  argp.add_argument(
-    '-b',
-    '--benchmarks',
-    nargs='+',
-    choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    help='Which benchmarks to run')
-  argp.add_argument(
-    '-d',
-    '--diff_base',
-    type=str,
-    help='Commit or branch to compare the current one to')
-  argp.add_argument(
-    '-o',
-    '--old',
-    default='old',
-    type=str,
-    help='Name of baseline run to compare to. Ususally just called "old"')
-  argp.add_argument(
-    '-r',
-    '--regex',
-    type=str,
-    default="",
-    help='Regex to filter benchmarks run')
-  argp.add_argument(
-    '-l',
-    '--loops',
-    type=int,
-    default=10,
-    help='Number of times to loops the benchmarks. More loops cuts down on noise'
-  )
-  argp.add_argument(
-    '-j',
-    '--jobs',
-    type=int,
-    default=multiprocessing.cpu_count(),
-    help='Number of CPUs to use')
-  argp.add_argument(
-    '--pr_comment_name',
-    type=str,
-    default="microbenchmarks",
-    help='Name that Jenkins will use to commen on the PR')
-  argp.add_argument('--counters', dest='counters', action='store_true')
-  argp.add_argument('--no-counters', dest='counters', action='store_false')
-  argp.set_defaults(counters=True)
-  args = argp.parse_args()
-  assert args.diff_base or args.old, "One of diff_base or old must be set!"
-  if args.loops < 3:
-    print "WARNING: This run will likely be noisy. Increase loops."
-  return args
+    argp = argparse.ArgumentParser(
+        description='Perform diff on microbenchmarks')
+    argp.add_argument(
+        '-t',
+        '--track',
+        choices=sorted(bm_constants._INTERESTING),
+        nargs='+',
+        default=sorted(bm_constants._INTERESTING),
+        help='Which metrics to track')
+    argp.add_argument(
+        '-b',
+        '--benchmarks',
+        nargs='+',
+        choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        help='Which benchmarks to run')
+    argp.add_argument(
+        '-d',
+        '--diff_base',
+        type=str,
+        help='Commit or branch to compare the current one to')
+    argp.add_argument(
+        '-o',
+        '--old',
+        default='old',
+        type=str,
+        help='Name of baseline run to compare to. Ususally just called "old"')
+    argp.add_argument(
+        '-r',
+        '--regex',
+        type=str,
+        default="",
+        help='Regex to filter benchmarks run')
+    argp.add_argument(
+        '-l',
+        '--loops',
+        type=int,
+        default=10,
+        help='Number of times to loops the benchmarks. More loops cuts down on noise'
+    )
+    argp.add_argument(
+        '-j',
+        '--jobs',
+        type=int,
+        default=multiprocessing.cpu_count(),
+        help='Number of CPUs to use')
+    argp.add_argument(
+        '--pr_comment_name',
+        type=str,
+        default="microbenchmarks",
+        help='Name that Jenkins will use to commen on the PR')
+    argp.add_argument('--counters', dest='counters', action='store_true')
+    argp.add_argument('--no-counters', dest='counters', action='store_false')
+    argp.set_defaults(counters=True)
+    args = argp.parse_args()
+    assert args.diff_base or args.old, "One of diff_base or old must be set!"
+    if args.loops < 3:
+        print "WARNING: This run will likely be noisy. Increase loops."
+    return args
 
 
 def eintr_be_gone(fn):
-  """Run fn until it doesn't stop because of EINTR"""
+    """Run fn until it doesn't stop because of EINTR"""
 
-  def inner(*args):
-    while True:
-      try:
-        return fn(*args)
-      except IOError, e:
-        if e.errno != errno.EINTR:
-          raise
+    def inner(*args):
+        while True:
+            try:
+                return fn(*args)
+            except IOError, e:
+                if e.errno != errno.EINTR:
+                    raise
 
-  return inner
+    return inner
 
 
 def main(args):
 
-  bm_build.build('new', args.benchmarks, args.jobs, args.counters)
+    bm_build.build('new', args.benchmarks, args.jobs, args.counters)
 
-  old = args.old
-  if args.diff_base:
-    old = 'old'
-    where_am_i = subprocess.check_output(
-      ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
-    subprocess.check_call(['git', 'checkout', args.diff_base])
-    try:
-      bm_build.build(old, args.benchmarks, args.jobs, args.counters)
-    finally:
-      subprocess.check_call(['git', 'checkout', where_am_i])
-      subprocess.check_call(['git', 'submodule', 'update'])
+    old = args.old
+    if args.diff_base:
+        old = 'old'
+        where_am_i = subprocess.check_output(
+            ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
+        subprocess.check_call(['git', 'checkout', args.diff_base])
+        try:
+            bm_build.build(old, args.benchmarks, args.jobs, args.counters)
+        finally:
+            subprocess.check_call(['git', 'checkout', where_am_i])
+            subprocess.check_call(['git', 'submodule', 'update'])
 
-  jobs_list = []
-  jobs_list += bm_run.create_jobs('new', args.benchmarks, args.loops, args.regex, args.counters)
-  jobs_list += bm_run.create_jobs(old, args.benchmarks, args.loops, args.regex, args.counters)
+    jobs_list = []
+    jobs_list += bm_run.create_jobs('new', args.benchmarks, args.loops,
+                                    args.regex, args.counters)
+    jobs_list += bm_run.create_jobs(old, args.benchmarks, args.loops,
+                                    args.regex, args.counters)
 
-  # shuffle all jobs to eliminate noise from GCE CPU drift
-  random.shuffle(jobs_list, random.SystemRandom().random)
-  jobset.run(jobs_list, maxjobs=args.jobs)
+    # shuffle all jobs to eliminate noise from GCE CPU drift
+    random.shuffle(jobs_list, random.SystemRandom().random)
+    jobset.run(jobs_list, maxjobs=args.jobs)
 
-  diff, note = bm_diff.diff(args.benchmarks, args.loops, args.regex, args.track, old,
-                'new', args.counters)
-  if diff:
-    text = '[%s] Performance differences noted:\n%s' % (args.pr_comment_name, diff)
-  else:
-    text = '[%s] No significant performance differences' % args.pr_comment_name
-  if note:
-    text = note + '\n\n' + text
-  print('%s' % text)
-  comment_on_pr.comment_on_pr('```\n%s\n```' % text)
+    diff, note = bm_diff.diff(args.benchmarks, args.loops, args.regex,
+                              args.track, old, 'new', args.counters)
+    if diff:
+        text = '[%s] Performance differences noted:\n%s' % (
+            args.pr_comment_name, diff)
+    else:
+        text = '[%s] No significant performance differences' % args.pr_comment_name
+    if note:
+        text = note + '\n\n' + text
+    print('%s' % text)
+    comment_on_pr.comment_on_pr('```\n%s\n```' % text)
 
 
 if __name__ == '__main__':
-  args = _args()
-  main(args)
+    args = _args()
+    main(args)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_run.py b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
index 81db5a2..08894bb 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_run.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """ Python utility to run opt and counters benchmarks and save json output """
 
 import bm_constants
@@ -27,93 +26,96 @@
 import os
 
 sys.path.append(
-  os.path.join(
-    os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
-    'python_utils'))
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
+        'python_utils'))
 import jobset
 
 
 def _args():
-  argp = argparse.ArgumentParser(description='Runs microbenchmarks')
-  argp.add_argument(
-    '-b',
-    '--benchmarks',
-    nargs='+',
-    choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    help='Benchmarks to run')
-  argp.add_argument(
-    '-j',
-    '--jobs',
-    type=int,
-    default=multiprocessing.cpu_count(),
-    help='Number of CPUs to use')
-  argp.add_argument(
-    '-n',
-    '--name',
-    type=str,
-    help='Unique name of the build to run. Needs to match the handle passed to bm_build.py'
-  )
-  argp.add_argument(
-    '-r',
-    '--regex',
-    type=str,
-    default="",
-    help='Regex to filter benchmarks run')
-  argp.add_argument(
-    '-l',
-    '--loops',
-    type=int,
-    default=20,
-    help='Number of times to loops the benchmarks. More loops cuts down on noise'
-  )
-  argp.add_argument('--counters', dest='counters', action='store_true')
-  argp.add_argument('--no-counters', dest='counters', action='store_false')
-  argp.set_defaults(counters=True)
-  args = argp.parse_args()
-  assert args.name
-  if args.loops < 3:
-    print "WARNING: This run will likely be noisy. Increase loops to at least 3."
-  return args
+    argp = argparse.ArgumentParser(description='Runs microbenchmarks')
+    argp.add_argument(
+        '-b',
+        '--benchmarks',
+        nargs='+',
+        choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        help='Benchmarks to run')
+    argp.add_argument(
+        '-j',
+        '--jobs',
+        type=int,
+        default=multiprocessing.cpu_count(),
+        help='Number of CPUs to use')
+    argp.add_argument(
+        '-n',
+        '--name',
+        type=str,
+        help='Unique name of the build to run. Needs to match the handle passed to bm_build.py'
+    )
+    argp.add_argument(
+        '-r',
+        '--regex',
+        type=str,
+        default="",
+        help='Regex to filter benchmarks run')
+    argp.add_argument(
+        '-l',
+        '--loops',
+        type=int,
+        default=20,
+        help='Number of times to loops the benchmarks. More loops cuts down on noise'
+    )
+    argp.add_argument('--counters', dest='counters', action='store_true')
+    argp.add_argument('--no-counters', dest='counters', action='store_false')
+    argp.set_defaults(counters=True)
+    args = argp.parse_args()
+    assert args.name
+    if args.loops < 3:
+        print "WARNING: This run will likely be noisy. Increase loops to at least 3."
+    return args
 
 
 def _collect_bm_data(bm, cfg, name, regex, idx, loops):
-  jobs_list = []
-  for line in subprocess.check_output(
-    ['bm_diff_%s/%s/%s' % (name, cfg, bm),
-     '--benchmark_list_tests', '--benchmark_filter=%s' % regex]).splitlines():
-    stripped_line = line.strip().replace("/", "_").replace(
-      "<", "_").replace(">", "_").replace(", ", "_")
-    cmd = [
-      'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_filter=^%s$' %
-      line, '--benchmark_out=%s.%s.%s.%s.%d.json' %
-      (bm, stripped_line, cfg, name, idx), '--benchmark_out_format=json',
-    ]
-    jobs_list.append(
-      jobset.JobSpec(
-        cmd,
-        shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1,
-                         loops),
-        verbose_success=True,
-        cpu_cost=2,
-        timeout_seconds=60 * 60)) # one hour
-  return jobs_list
+    jobs_list = []
+    for line in subprocess.check_output([
+            'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_list_tests',
+            '--benchmark_filter=%s' % regex
+    ]).splitlines():
+        stripped_line = line.strip().replace("/", "_").replace(
+            "<", "_").replace(">", "_").replace(", ", "_")
+        cmd = [
+            'bm_diff_%s/%s/%s' % (name, cfg, bm),
+            '--benchmark_filter=^%s$' % line,
+            '--benchmark_out=%s.%s.%s.%s.%d.json' %
+            (bm, stripped_line, cfg, name, idx),
+            '--benchmark_out_format=json',
+        ]
+        jobs_list.append(
+            jobset.JobSpec(
+                cmd,
+                shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1,
+                                                 loops),
+                verbose_success=True,
+                cpu_cost=2,
+                timeout_seconds=60 * 60))  # one hour
+    return jobs_list
 
 
 def create_jobs(name, benchmarks, loops, regex, counters):
-  jobs_list = []
-  for loop in range(0, loops):
-    for bm in benchmarks:
-      jobs_list += _collect_bm_data(bm, 'opt', name, regex, loop, loops)
-      if counters:
-        jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop,
-                        loops)
-  random.shuffle(jobs_list, random.SystemRandom().random)
-  return jobs_list
+    jobs_list = []
+    for loop in range(0, loops):
+        for bm in benchmarks:
+            jobs_list += _collect_bm_data(bm, 'opt', name, regex, loop, loops)
+            if counters:
+                jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop,
+                                              loops)
+    random.shuffle(jobs_list, random.SystemRandom().random)
+    return jobs_list
 
 
 if __name__ == '__main__':
-  args = _args()
-  jobs_list = create_jobs(args.name, args.benchmarks, args.loops, 
-                          args.regex, args.counters)
-  jobset.run(jobs_list, maxjobs=args.jobs)
+    args = _args()
+    jobs_list = create_jobs(args.name, args.benchmarks, args.loops, args.regex,
+                            args.counters)
+    jobset.run(jobs_list, maxjobs=args.jobs)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py b/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
index 63e691a..2a77040 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
@@ -19,40 +19,41 @@
 
 _DEFAULT_THRESHOLD = 1e-10
 
+
 def scale(a, mul):
-  return [x * mul for x in a]
+    return [x * mul for x in a]
 
 
 def cmp(a, b):
-  return stats.ttest_ind(a, b)
+    return stats.ttest_ind(a, b)
 
 
-def speedup(new, old, threshold = _DEFAULT_THRESHOLD):
-  if (len(set(new))) == 1 and new == old: return 0
-  s0, p0 = cmp(new, old)
-  if math.isnan(p0): return 0
-  if s0 == 0: return 0
-  if p0 > threshold: return 0
-  if s0 < 0:
-    pct = 1
-    while pct < 100:
-      sp, pp = cmp(new, scale(old, 1 - pct / 100.0))
-      if sp > 0: break
-      if pp > threshold: break
-      pct += 1
-    return -(pct - 1)
-  else:
-    pct = 1
-    while pct < 10000:
-      sp, pp = cmp(new, scale(old, 1 + pct / 100.0))
-      if sp < 0: break
-      if pp > threshold: break
-      pct += 1
-    return pct - 1
+def speedup(new, old, threshold=_DEFAULT_THRESHOLD):
+    if (len(set(new))) == 1 and new == old: return 0
+    s0, p0 = cmp(new, old)
+    if math.isnan(p0): return 0
+    if s0 == 0: return 0
+    if p0 > threshold: return 0
+    if s0 < 0:
+        pct = 1
+        while pct < 100:
+            sp, pp = cmp(new, scale(old, 1 - pct / 100.0))
+            if sp > 0: break
+            if pp > threshold: break
+            pct += 1
+        return -(pct - 1)
+    else:
+        pct = 1
+        while pct < 10000:
+            sp, pp = cmp(new, scale(old, 1 + pct / 100.0))
+            if sp < 0: break
+            if pp > threshold: break
+            pct += 1
+        return pct - 1
 
 
 if __name__ == "__main__":
-  new = [0.0, 0.0, 0.0, 0.0] 
-  old = [2.96608e-06, 3.35076e-06, 3.45384e-06, 3.34407e-06]
-  print speedup(new, old, 1e-5)
-  print speedup(old, new, 1e-5)
+    new = [0.0, 0.0, 0.0, 0.0]
+    old = [2.96608e-06, 3.35076e-06, 3.45384e-06, 3.34407e-06]
+    print speedup(new, old, 1e-5)
+    print speedup(old, new, 1e-5)
diff --git a/tools/profiling/microbenchmarks/bm_json.py b/tools/profiling/microbenchmarks/bm_json.py
index eb450ee..1dd9f65 100644
--- a/tools/profiling/microbenchmarks/bm_json.py
+++ b/tools/profiling/microbenchmarks/bm_json.py
@@ -15,187 +15,196 @@
 import os
 
 _BM_SPECS = {
-  'BM_UnaryPingPong': {
-    'tpl': ['fixture', 'client_mutator', 'server_mutator'],
-    'dyn': ['request_size', 'response_size'],
-  },
-  'BM_PumpStreamClientToServer': {
-    'tpl': ['fixture'],
-    'dyn': ['request_size'],
-  },
-  'BM_PumpStreamServerToClient': {
-    'tpl': ['fixture'],
-    'dyn': ['request_size'],
-  },
-  'BM_StreamingPingPong': {
-    'tpl': ['fixture', 'client_mutator', 'server_mutator'],
-    'dyn': ['request_size', 'request_count'],
-  },
-  'BM_StreamingPingPongMsgs': {
-    'tpl': ['fixture', 'client_mutator', 'server_mutator'],
-    'dyn': ['request_size'],
-  },
-  'BM_PumpStreamServerToClient_Trickle': {
-    'tpl': [],
-    'dyn': ['request_size', 'bandwidth_kilobits'],
-  },
-  'BM_PumpUnbalancedUnary_Trickle': {
-    'tpl': [],
-    'dyn': ['cli_req_size', 'svr_req_size', 'bandwidth_kilobits'],
-  },
-  'BM_ErrorStringOnNewError': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_ErrorStringRepeatedly': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_ErrorGetStatus': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_ErrorGetStatusCode': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_ErrorHttpError': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_HasClearGrpcStatus': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_IsolatedFilter': {
-    'tpl': ['fixture', 'client_mutator'],
-    'dyn': [],
-  },
-  'BM_HpackEncoderEncodeHeader': {
-    'tpl': ['fixture'],
-    'dyn': ['end_of_stream', 'request_size'],
-  },
-  'BM_HpackParserParseHeader': {
-    'tpl': ['fixture', 'on_header'],
-    'dyn': [],
-  },
-  'BM_CallCreateDestroy': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_Zalloc': {
-    'tpl': [],
-    'dyn': ['request_size'],
-  },
-  'BM_PollEmptyPollset_SpeedOfLight': {
-    'tpl': [],
-    'dyn': ['request_size', 'request_count'],
-  },
-  'BM_StreamCreateSendInitialMetadataDestroy': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_TransportStreamSend': {
-    'tpl': [],
-    'dyn': ['request_size'],
-  },
-  'BM_TransportStreamRecv': {
-    'tpl': [],
-    'dyn': ['request_size'],
-  },
-  'BM_StreamingPingPongWithCoalescingApi': {
-    'tpl': ['fixture', 'client_mutator', 'server_mutator'],
-    'dyn': ['request_size', 'request_count', 'end_of_stream'],
-  },
-  'BM_Base16SomeStuff': {
-    'tpl': [],
-    'dyn': ['request_size'],
-  }
+    'BM_UnaryPingPong': {
+        'tpl': ['fixture', 'client_mutator', 'server_mutator'],
+        'dyn': ['request_size', 'response_size'],
+    },
+    'BM_PumpStreamClientToServer': {
+        'tpl': ['fixture'],
+        'dyn': ['request_size'],
+    },
+    'BM_PumpStreamServerToClient': {
+        'tpl': ['fixture'],
+        'dyn': ['request_size'],
+    },
+    'BM_StreamingPingPong': {
+        'tpl': ['fixture', 'client_mutator', 'server_mutator'],
+        'dyn': ['request_size', 'request_count'],
+    },
+    'BM_StreamingPingPongMsgs': {
+        'tpl': ['fixture', 'client_mutator', 'server_mutator'],
+        'dyn': ['request_size'],
+    },
+    'BM_PumpStreamServerToClient_Trickle': {
+        'tpl': [],
+        'dyn': ['request_size', 'bandwidth_kilobits'],
+    },
+    'BM_PumpUnbalancedUnary_Trickle': {
+        'tpl': [],
+        'dyn': ['cli_req_size', 'svr_req_size', 'bandwidth_kilobits'],
+    },
+    'BM_ErrorStringOnNewError': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_ErrorStringRepeatedly': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_ErrorGetStatus': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_ErrorGetStatusCode': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_ErrorHttpError': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_HasClearGrpcStatus': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_IsolatedFilter': {
+        'tpl': ['fixture', 'client_mutator'],
+        'dyn': [],
+    },
+    'BM_HpackEncoderEncodeHeader': {
+        'tpl': ['fixture'],
+        'dyn': ['end_of_stream', 'request_size'],
+    },
+    'BM_HpackParserParseHeader': {
+        'tpl': ['fixture', 'on_header'],
+        'dyn': [],
+    },
+    'BM_CallCreateDestroy': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_Zalloc': {
+        'tpl': [],
+        'dyn': ['request_size'],
+    },
+    'BM_PollEmptyPollset_SpeedOfLight': {
+        'tpl': [],
+        'dyn': ['request_size', 'request_count'],
+    },
+    'BM_StreamCreateSendInitialMetadataDestroy': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_TransportStreamSend': {
+        'tpl': [],
+        'dyn': ['request_size'],
+    },
+    'BM_TransportStreamRecv': {
+        'tpl': [],
+        'dyn': ['request_size'],
+    },
+    'BM_StreamingPingPongWithCoalescingApi': {
+        'tpl': ['fixture', 'client_mutator', 'server_mutator'],
+        'dyn': ['request_size', 'request_count', 'end_of_stream'],
+    },
+    'BM_Base16SomeStuff': {
+        'tpl': [],
+        'dyn': ['request_size'],
+    }
 }
 
+
 def numericalize(s):
-  if not s: return ''
-  if s[-1] == 'k':
-    return float(s[:-1]) * 1024
-  if s[-1] == 'M':
-    return float(s[:-1]) * 1024 * 1024
-  if 0 <= (ord(s[-1]) - ord('0')) <= 9:
-    return float(s)
-  assert 'not a number: %s' % s
+    if not s: return ''
+    if s[-1] == 'k':
+        return float(s[:-1]) * 1024
+    if s[-1] == 'M':
+        return float(s[:-1]) * 1024 * 1024
+    if 0 <= (ord(s[-1]) - ord('0')) <= 9:
+        return float(s)
+    assert 'not a number: %s' % s
+
 
 def parse_name(name):
-  cpp_name = name
-  if '<' not in name and '/' not in name and name not in _BM_SPECS:
-    return {'name': name, 'cpp_name': name}
-  rest = name
-  out = {}
-  tpl_args = []
-  dyn_args = []
-  if '<' in rest:
-    tpl_bit = rest[rest.find('<') + 1 : rest.rfind('>')]
-    arg = ''
-    nesting = 0
-    for c in tpl_bit:
-      if c == '<':
-        nesting += 1
-        arg += c
-      elif c == '>':
-        nesting -= 1
-        arg += c
-      elif c == ',':
-        if nesting == 0:
-          tpl_args.append(arg.strip())
-          arg = ''
-        else:
-          arg += c
-      else:
-        arg += c
-    tpl_args.append(arg.strip())
-    rest = rest[:rest.find('<')] + rest[rest.rfind('>') + 1:]
-  if '/' in rest:
-    s = rest.split('/')
-    rest = s[0]
-    dyn_args = s[1:]
-  name = rest
-  print (name)
-  print (dyn_args, _BM_SPECS[name]['dyn'])
-  print (tpl_args, _BM_SPECS[name]['tpl'])
-  assert name in _BM_SPECS, '_BM_SPECS needs to be expanded for %s' % name
-  assert len(dyn_args) == len(_BM_SPECS[name]['dyn'])
-  assert len(tpl_args) == len(_BM_SPECS[name]['tpl'])
-  out['name'] = name
-  out['cpp_name'] = cpp_name
-  out.update(dict((k, numericalize(v)) for k, v in zip(_BM_SPECS[name]['dyn'], dyn_args)))
-  out.update(dict(zip(_BM_SPECS[name]['tpl'], tpl_args)))
-  return out
+    cpp_name = name
+    if '<' not in name and '/' not in name and name not in _BM_SPECS:
+        return {'name': name, 'cpp_name': name}
+    rest = name
+    out = {}
+    tpl_args = []
+    dyn_args = []
+    if '<' in rest:
+        tpl_bit = rest[rest.find('<') + 1:rest.rfind('>')]
+        arg = ''
+        nesting = 0
+        for c in tpl_bit:
+            if c == '<':
+                nesting += 1
+                arg += c
+            elif c == '>':
+                nesting -= 1
+                arg += c
+            elif c == ',':
+                if nesting == 0:
+                    tpl_args.append(arg.strip())
+                    arg = ''
+                else:
+                    arg += c
+            else:
+                arg += c
+        tpl_args.append(arg.strip())
+        rest = rest[:rest.find('<')] + rest[rest.rfind('>') + 1:]
+    if '/' in rest:
+        s = rest.split('/')
+        rest = s[0]
+        dyn_args = s[1:]
+    name = rest
+    print(name)
+    print(dyn_args, _BM_SPECS[name]['dyn'])
+    print(tpl_args, _BM_SPECS[name]['tpl'])
+    assert name in _BM_SPECS, '_BM_SPECS needs to be expanded for %s' % name
+    assert len(dyn_args) == len(_BM_SPECS[name]['dyn'])
+    assert len(tpl_args) == len(_BM_SPECS[name]['tpl'])
+    out['name'] = name
+    out['cpp_name'] = cpp_name
+    out.update(
+        dict((k, numericalize(v))
+             for k, v in zip(_BM_SPECS[name]['dyn'], dyn_args)))
+    out.update(dict(zip(_BM_SPECS[name]['tpl'], tpl_args)))
+    return out
 
-def expand_json(js, js2 = None):
-  if not js and not js2: raise StopIteration()
-  if not js: js = js2
-  for bm in js['benchmarks']:
-    if bm['name'].endswith('_stddev') or bm['name'].endswith('_mean'): continue
-    context = js['context']
-    if 'label' in bm:
-      labels_list = [s.split(':') for s in bm['label'].strip().split(' ') if len(s) and s[0] != '#']
-      for el in labels_list:
-        el[0] = el[0].replace('/iter', '_per_iteration')
-      labels = dict(labels_list)
-    else:
-      labels = {}
-    row = {
-      'jenkins_build': os.environ.get('BUILD_NUMBER', ''),
-      'jenkins_job': os.environ.get('JOB_NAME', ''),
-    }
-    row.update(context)
-    row.update(bm)
-    row.update(parse_name(row['name']))
-    row.update(labels)
-    if js2:
-      for bm2 in js2['benchmarks']:
-        if bm['name'] == bm2['name'] and 'already_used' not in bm2:
-          row['cpu_time'] = bm2['cpu_time']
-          row['real_time'] = bm2['real_time']
-          row['iterations'] = bm2['iterations']
-          bm2['already_used'] = True
-          break
-    yield row
+
+def expand_json(js, js2=None):
+    if not js and not js2: raise StopIteration()
+    if not js: js = js2
+    for bm in js['benchmarks']:
+        if bm['name'].endswith('_stddev') or bm['name'].endswith('_mean'):
+            continue
+        context = js['context']
+        if 'label' in bm:
+            labels_list = [
+                s.split(':') for s in bm['label'].strip().split(' ')
+                if len(s) and s[0] != '#'
+            ]
+            for el in labels_list:
+                el[0] = el[0].replace('/iter', '_per_iteration')
+            labels = dict(labels_list)
+        else:
+            labels = {}
+        row = {
+            'jenkins_build': os.environ.get('BUILD_NUMBER', ''),
+            'jenkins_job': os.environ.get('JOB_NAME', ''),
+        }
+        row.update(context)
+        row.update(bm)
+        row.update(parse_name(row['name']))
+        row.update(labels)
+        if js2:
+            for bm2 in js2['benchmarks']:
+                if bm['name'] == bm2['name'] and 'already_used' not in bm2:
+                    row['cpu_time'] = bm2['cpu_time']
+                    row['real_time'] = bm2['real_time']
+                    row['iterations'] = bm2['iterations']
+                    bm2['already_used'] = True
+                    break
+        yield row
diff --git a/tools/profiling/qps/qps_diff.py b/tools/profiling/qps/qps_diff.py
index 0654f45..55a81f0 100755
--- a/tools/profiling/qps/qps_diff.py
+++ b/tools/profiling/qps/qps_diff.py
@@ -26,144 +26,146 @@
 import tabulate
 
 sys.path.append(
-  os.path.join(
-    os.path.dirname(sys.argv[0]), '..', 'microbenchmarks', 'bm_diff'))
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', 'microbenchmarks', 'bm_diff'))
 import bm_speedup
 
 sys.path.append(
-  os.path.join(
-    os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
 import comment_on_pr
 
 
 def _args():
-  argp = argparse.ArgumentParser(
-    description='Perform diff on QPS Driver')
-  argp.add_argument(
-    '-d',
-    '--diff_base',
-    type=str,
-    help='Commit or branch to compare the current one to')
-  argp.add_argument(
-    '-l',
-    '--loops',
-    type=int,
-    default=4,
-    help='Number of loops for each benchmark. More loops cuts down on noise'
-  )
-  argp.add_argument(
-    '-j',
-    '--jobs',
-    type=int,
-    default=multiprocessing.cpu_count(),
-    help='Number of CPUs to use')
-  args = argp.parse_args()
-  assert args.diff_base, "diff_base must be set"
-  return args
+    argp = argparse.ArgumentParser(description='Perform diff on QPS Driver')
+    argp.add_argument(
+        '-d',
+        '--diff_base',
+        type=str,
+        help='Commit or branch to compare the current one to')
+    argp.add_argument(
+        '-l',
+        '--loops',
+        type=int,
+        default=4,
+        help='Number of loops for each benchmark. More loops cuts down on noise')
+    argp.add_argument(
+        '-j',
+        '--jobs',
+        type=int,
+        default=multiprocessing.cpu_count(),
+        help='Number of CPUs to use')
+    args = argp.parse_args()
+    assert args.diff_base, "diff_base must be set"
+    return args
 
 
 def _make_cmd(jobs):
-  return ['make', '-j', '%d' % jobs, 'qps_json_driver', 'qps_worker']
+    return ['make', '-j', '%d' % jobs, 'qps_json_driver', 'qps_worker']
 
 
 def build(name, jobs):
-  shutil.rmtree('qps_diff_%s' % name, ignore_errors=True)
-  subprocess.check_call(['git', 'submodule', 'update'])
-  try:
-    subprocess.check_call(_make_cmd(jobs))
-  except subprocess.CalledProcessError, e:
-    subprocess.check_call(['make', 'clean'])
-    subprocess.check_call(_make_cmd(jobs))
-  os.rename('bins', 'qps_diff_%s' % name)
+    shutil.rmtree('qps_diff_%s' % name, ignore_errors=True)
+    subprocess.check_call(['git', 'submodule', 'update'])
+    try:
+        subprocess.check_call(_make_cmd(jobs))
+    except subprocess.CalledProcessError, e:
+        subprocess.check_call(['make', 'clean'])
+        subprocess.check_call(_make_cmd(jobs))
+    os.rename('bins', 'qps_diff_%s' % name)
 
 
 def _run_cmd(name, scenario, fname):
-  return ['qps_diff_%s/opt/qps_json_driver' % name, '--scenarios_json', scenario, '--json_file_out', fname]
+    return [
+        'qps_diff_%s/opt/qps_json_driver' % name, '--scenarios_json', scenario,
+        '--json_file_out', fname
+    ]
 
 
 def run(name, scenarios, loops):
-  for sn in scenarios:
-    for i in range(0, loops):
-      fname = "%s.%s.%d.json" % (sn, name, i)
-      subprocess.check_call(_run_cmd(name, scenarios[sn], fname))
+    for sn in scenarios:
+        for i in range(0, loops):
+            fname = "%s.%s.%d.json" % (sn, name, i)
+            subprocess.check_call(_run_cmd(name, scenarios[sn], fname))
 
 
 def _load_qps(fname):
-  try:
-    with open(fname) as f:
-      return json.loads(f.read())['qps']
-  except IOError, e:
-    print("IOError occurred reading file: %s" % fname)
-    return None
-  except ValueError, e:
-    print("ValueError occurred reading file: %s" % fname)
-    return None
+    try:
+        with open(fname) as f:
+            return json.loads(f.read())['qps']
+    except IOError, e:
+        print("IOError occurred reading file: %s" % fname)
+        return None
+    except ValueError, e:
+        print("ValueError occurred reading file: %s" % fname)
+        return None
 
 
 def _median(ary):
-  assert (len(ary))
-  ary = sorted(ary)
-  n = len(ary)
-  if n % 2 == 0:
-    return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
-  else:
-    return ary[n / 2]
+    assert (len(ary))
+    ary = sorted(ary)
+    n = len(ary)
+    if n % 2 == 0:
+        return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
+    else:
+        return ary[n / 2]
 
 
 def diff(scenarios, loops, old, new):
-  old_data = {}
-  new_data = {}
+    old_data = {}
+    new_data = {}
 
-  # collect data
-  for sn in scenarios:
-    old_data[sn] = []
-    new_data[sn] = []
-    for i in range(loops):
-      old_data[sn].append(_load_qps("%s.%s.%d.json" % (sn, old, i)))
-      new_data[sn].append(_load_qps("%s.%s.%d.json" % (sn, new, i)))
+    # collect data
+    for sn in scenarios:
+        old_data[sn] = []
+        new_data[sn] = []
+        for i in range(loops):
+            old_data[sn].append(_load_qps("%s.%s.%d.json" % (sn, old, i)))
+            new_data[sn].append(_load_qps("%s.%s.%d.json" % (sn, new, i)))
 
-  # crunch data
-  headers = ['Benchmark', 'qps']
-  rows = []
-  for sn in scenarios:
-    mdn_diff = abs(_median(new_data[sn]) - _median(old_data[sn]))
-    print('%s: %s=%r %s=%r mdn_diff=%r' % (sn, new, new_data[sn], old, old_data[sn], mdn_diff))
-    s = bm_speedup.speedup(new_data[sn], old_data[sn], 10e-5)
-    if abs(s) > 3 and mdn_diff > 0.5:
-      rows.append([sn, '%+d%%' % s])
+    # crunch data
+    headers = ['Benchmark', 'qps']
+    rows = []
+    for sn in scenarios:
+        mdn_diff = abs(_median(new_data[sn]) - _median(old_data[sn]))
+        print('%s: %s=%r %s=%r mdn_diff=%r' %
+              (sn, new, new_data[sn], old, old_data[sn], mdn_diff))
+        s = bm_speedup.speedup(new_data[sn], old_data[sn], 10e-5)
+        if abs(s) > 3 and mdn_diff > 0.5:
+            rows.append([sn, '%+d%%' % s])
 
-  if rows:
-    return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f')
-  else:
-    return None
+    if rows:
+        return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f')
+    else:
+        return None
 
 
 def main(args):
-  build('new', args.jobs)
+    build('new', args.jobs)
 
-  if args.diff_base:
-    where_am_i = subprocess.check_output(
-      ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
-    subprocess.check_call(['git', 'checkout', args.diff_base])
-    try:
-      build('old', args.jobs)
-    finally:
-      subprocess.check_call(['git', 'checkout', where_am_i])
-      subprocess.check_call(['git', 'submodule', 'update'])
+    if args.diff_base:
+        where_am_i = subprocess.check_output(
+            ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
+        subprocess.check_call(['git', 'checkout', args.diff_base])
+        try:
+            build('old', args.jobs)
+        finally:
+            subprocess.check_call(['git', 'checkout', where_am_i])
+            subprocess.check_call(['git', 'submodule', 'update'])
 
-  run('new', qps_scenarios._SCENARIOS, args.loops)
-  run('old', qps_scenarios._SCENARIOS, args.loops)
+    run('new', qps_scenarios._SCENARIOS, args.loops)
+    run('old', qps_scenarios._SCENARIOS, args.loops)
 
-  diff_output = diff(qps_scenarios._SCENARIOS, args.loops, 'old', 'new')
+    diff_output = diff(qps_scenarios._SCENARIOS, args.loops, 'old', 'new')
 
-  if diff_output:
-    text = '[qps] Performance differences noted:\n%s' % diff_output
-  else:
-    text = '[qps] No significant performance differences'
-  print('%s' % text)
-  comment_on_pr.comment_on_pr('```\n%s\n```' % text)
+    if diff_output:
+        text = '[qps] Performance differences noted:\n%s' % diff_output
+    else:
+        text = '[qps] No significant performance differences'
+    print('%s' % text)
+    comment_on_pr.comment_on_pr('```\n%s\n```' % text)
 
 
 if __name__ == '__main__':
-  args = _args()
-  main(args)
+    args = _args()
+    main(args)
diff --git a/tools/profiling/qps/qps_scenarios.py b/tools/profiling/qps/qps_scenarios.py
index 4fbbdef..532acc9 100644
--- a/tools/profiling/qps/qps_scenarios.py
+++ b/tools/profiling/qps/qps_scenarios.py
@@ -14,6 +14,8 @@
 """ QPS Scenarios to run """
 
 _SCENARIOS = {
-  'large-message-throughput': '{"scenarios":[{"name":"large-message-throughput", "spawn_local_worker_count": -2, "warmup_seconds": 30, "benchmark_seconds": 270, "num_servers": 1, "server_config": {"async_server_threads": 1, "security_params": null, "server_type": "ASYNC_SERVER"}, "num_clients": 1, "client_config": {"client_type": "ASYNC_CLIENT", "security_params": null, "payload_config": {"simple_params": {"resp_size": 1048576, "req_size": 1048576}}, "client_channels": 1, "async_client_threads": 1, "outstanding_rpcs_per_channel": 1, "rpc_type": "UNARY", "load_params": {"closed_loop": {}}, "histogram_params": {"max_possible": 60000000000.0, "resolution": 0.01}}}]}',
-  'multi-channel-64-KiB': '{"scenarios":[{"name":"multi-channel-64-KiB", "spawn_local_worker_count": -3, "warmup_seconds": 30, "benchmark_seconds": 270, "num_servers": 1, "server_config": {"async_server_threads": 31, "security_params": null, "server_type": "ASYNC_SERVER"}, "num_clients": 2, "client_config": {"client_type": "ASYNC_CLIENT", "security_params": null, "payload_config": {"simple_params": {"resp_size": 65536, "req_size": 65536}}, "client_channels": 32, "async_client_threads": 31, "outstanding_rpcs_per_channel": 100, "rpc_type": "UNARY", "load_params": {"closed_loop": {}}, "histogram_params": {"max_possible": 60000000000.0, "resolution": 0.01}}}]}'
+    'large-message-throughput':
+    '{"scenarios":[{"name":"large-message-throughput", "spawn_local_worker_count": -2, "warmup_seconds": 30, "benchmark_seconds": 270, "num_servers": 1, "server_config": {"async_server_threads": 1, "security_params": null, "server_type": "ASYNC_SERVER"}, "num_clients": 1, "client_config": {"client_type": "ASYNC_CLIENT", "security_params": null, "payload_config": {"simple_params": {"resp_size": 1048576, "req_size": 1048576}}, "client_channels": 1, "async_client_threads": 1, "outstanding_rpcs_per_channel": 1, "rpc_type": "UNARY", "load_params": {"closed_loop": {}}, "histogram_params": {"max_possible": 60000000000.0, "resolution": 0.01}}}]}',
+    'multi-channel-64-KiB':
+    '{"scenarios":[{"name":"multi-channel-64-KiB", "spawn_local_worker_count": -3, "warmup_seconds": 30, "benchmark_seconds": 270, "num_servers": 1, "server_config": {"async_server_threads": 31, "security_params": null, "server_type": "ASYNC_SERVER"}, "num_clients": 2, "client_config": {"client_type": "ASYNC_CLIENT", "security_params": null, "payload_config": {"simple_params": {"resp_size": 65536, "req_size": 65536}}, "client_channels": 32, "async_client_threads": 31, "outstanding_rpcs_per_channel": 100, "rpc_type": "UNARY", "load_params": {"closed_loop": {}}, "histogram_params": {"max_possible": 60000000000.0, "resolution": 0.01}}}]}'
 }
diff --git a/tools/run_tests/artifacts/artifact_targets.py b/tools/run_tests/artifacts/artifact_targets.py
index ea202ed..dc0803b 100644
--- a/tools/run_tests/artifacts/artifact_targets.py
+++ b/tools/run_tests/artifacts/artifact_targets.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Definition of targets to build artifacts."""
 
 import os.path
@@ -24,316 +23,350 @@
 import python_utils.jobset as jobset
 
 
-def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
-                   flake_retries=0, timeout_retries=0, timeout_seconds=30*60,
-                   docker_base_image=None, extra_docker_args=None):
-  """Creates jobspec for a task running under docker."""
-  environ = environ.copy()
-  environ['RUN_COMMAND'] = shell_command
-  environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name
+def create_docker_jobspec(name,
+                          dockerfile_dir,
+                          shell_command,
+                          environ={},
+                          flake_retries=0,
+                          timeout_retries=0,
+                          timeout_seconds=30 * 60,
+                          docker_base_image=None,
+                          extra_docker_args=None):
+    """Creates jobspec for a task running under docker."""
+    environ = environ.copy()
+    environ['RUN_COMMAND'] = shell_command
+    environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name
 
-  docker_args=[]
-  for k,v in environ.items():
-    docker_args += ['-e', '%s=%s' % (k, v)]
-  docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
-                'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
-                'OUTPUT_DIR': 'artifacts'}
+    docker_args = []
+    for k, v in environ.items():
+        docker_args += ['-e', '%s=%s' % (k, v)]
+    docker_env = {
+        'DOCKERFILE_DIR': dockerfile_dir,
+        'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
+        'OUTPUT_DIR': 'artifacts'
+    }
 
-  if docker_base_image is not None:
-    docker_env['DOCKER_BASE_IMAGE'] = docker_base_image
-  if extra_docker_args is not None:
-    docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
-  jobspec = jobset.JobSpec(
-          cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
-          environ=docker_env,
-          shortname='build_artifact.%s' % (name),
-          timeout_seconds=timeout_seconds,
-          flake_retries=flake_retries,
-          timeout_retries=timeout_retries)
-  return jobspec
+    if docker_base_image is not None:
+        docker_env['DOCKER_BASE_IMAGE'] = docker_base_image
+    if extra_docker_args is not None:
+        docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
+    jobspec = jobset.JobSpec(
+        cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
+        docker_args,
+        environ=docker_env,
+        shortname='build_artifact.%s' % (name),
+        timeout_seconds=timeout_seconds,
+        flake_retries=flake_retries,
+        timeout_retries=timeout_retries)
+    return jobspec
 
 
-def create_jobspec(name, cmdline, environ={}, shell=False,
-                   flake_retries=0, timeout_retries=0, timeout_seconds=30*60,
+def create_jobspec(name,
+                   cmdline,
+                   environ={},
+                   shell=False,
+                   flake_retries=0,
+                   timeout_retries=0,
+                   timeout_seconds=30 * 60,
                    use_workspace=False,
                    cpu_cost=1.0):
-  """Creates jobspec."""
-  environ = environ.copy()
-  if use_workspace:
-    environ['WORKSPACE_NAME'] = 'workspace_%s' % name
-    environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
-    cmdline = ['bash',
-               'tools/run_tests/artifacts/run_in_workspace.sh'] + cmdline
-  else:
-    environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)
+    """Creates jobspec."""
+    environ = environ.copy()
+    if use_workspace:
+        environ['WORKSPACE_NAME'] = 'workspace_%s' % name
+        environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
+        cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
+                  ] + cmdline
+    else:
+        environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)
 
-  jobspec = jobset.JobSpec(
-          cmdline=cmdline,
-          environ=environ,
-          shortname='build_artifact.%s' % (name),
-          timeout_seconds=timeout_seconds,
-          flake_retries=flake_retries,
-          timeout_retries=timeout_retries,
-          shell=shell,
-          cpu_cost=cpu_cost)
-  return jobspec
+    jobspec = jobset.JobSpec(
+        cmdline=cmdline,
+        environ=environ,
+        shortname='build_artifact.%s' % (name),
+        timeout_seconds=timeout_seconds,
+        flake_retries=flake_retries,
+        timeout_retries=timeout_retries,
+        shell=shell,
+        cpu_cost=cpu_cost)
+    return jobspec
 
 
 _MACOS_COMPAT_FLAG = '-mmacosx-version-min=10.7'
 
-_ARCH_FLAG_MAP = {
-  'x86': '-m32',
-  'x64': '-m64'
-}
+_ARCH_FLAG_MAP = {'x86': '-m32', 'x64': '-m64'}
 
 
 class PythonArtifact:
-  """Builds Python artifacts."""
+    """Builds Python artifacts."""
 
-  def __init__(self, platform, arch, py_version):
-    self.name = 'python_%s_%s_%s' % (platform, arch, py_version)
-    self.platform = platform
-    self.arch = arch
-    self.labels = ['artifact', 'python', platform, arch, py_version]
-    self.py_version = py_version
+    def __init__(self, platform, arch, py_version):
+        self.name = 'python_%s_%s_%s' % (platform, arch, py_version)
+        self.platform = platform
+        self.arch = arch
+        self.labels = ['artifact', 'python', platform, arch, py_version]
+        self.py_version = py_version
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    environ = {}
-    if self.platform == 'linux_extra':
-      # Raspberry Pi build
-      environ['PYTHON'] = '/usr/local/bin/python{}'.format(self.py_version)
-      environ['PIP'] = '/usr/local/bin/pip{}'.format(self.py_version)
-      # https://github.com/resin-io-projects/armv7hf-debian-qemu/issues/9
-      # A QEMU bug causes submodule update to hang, so we copy directly
-      environ['RELATIVE_COPY_PATH'] = '.'
-      extra_args = ' --entrypoint=/usr/bin/qemu-arm-static '
-      return create_docker_jobspec(self.name,
-          'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
-          'tools/run_tests/artifacts/build_artifact_python.sh',
-          environ=environ,
-          timeout_seconds=60*60*5,
-          docker_base_image='quay.io/grpc/raspbian_{}'.format(self.arch),
-          extra_docker_args=extra_args)
-    elif self.platform == 'linux':
-      if self.arch == 'x86':
-        environ['SETARCH_CMD'] = 'linux32'
-      # Inside the manylinux container, the python installations are located in
-      # special places...
-      environ['PYTHON'] = '/opt/python/{}/bin/python'.format(self.py_version)
-      environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version)
-      # Platform autodetection for the manylinux1 image breaks so we set the
-      # defines ourselves.
-      # TODO(atash) get better platform-detection support in core so we don't
-      # need to do this manually...
-      environ['CFLAGS'] = '-DGPR_MANYLINUX1=1'
-      environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE'
-      environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE'
-      return create_docker_jobspec(self.name,
-          'tools/dockerfile/grpc_artifact_python_manylinux_%s' % self.arch,
-          'tools/run_tests/artifacts/build_artifact_python.sh',
-          environ=environ,
-          timeout_seconds=60*60,
-          docker_base_image='quay.io/pypa/manylinux1_i686' if self.arch == 'x86' else 'quay.io/pypa/manylinux1_x86_64')
-    elif self.platform == 'windows':
-      if 'Python27' in self.py_version or 'Python34' in self.py_version:
-        environ['EXT_COMPILER'] = 'mingw32'
-      else:
-        environ['EXT_COMPILER'] = 'msvc'
-      # For some reason, the batch script %random% always runs with the same
-      # seed.  We create a random temp-dir here
-      dir = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
-      return create_jobspec(self.name,
-                            ['tools\\run_tests\\artifacts\\build_artifact_python.bat',
-                             self.py_version,
-                             '32' if self.arch == 'x86' else '64'],
-                            environ=environ,
-                            timeout_seconds=45*60,
-                            use_workspace=True)
-    else:
-      environ['PYTHON'] = self.py_version
-      environ['SKIP_PIP_INSTALL'] = 'TRUE'
-      return create_jobspec(self.name,
-                            ['tools/run_tests/artifacts/build_artifact_python.sh'],
-                            environ=environ,
-                            timeout_seconds=60*60,
-                            use_workspace=True)
+    def build_jobspec(self):
+        environ = {}
+        if self.platform == 'linux_extra':
+            # Raspberry Pi build
+            environ['PYTHON'] = '/usr/local/bin/python{}'.format(
+                self.py_version)
+            environ['PIP'] = '/usr/local/bin/pip{}'.format(self.py_version)
+            # https://github.com/resin-io-projects/armv7hf-debian-qemu/issues/9
+            # A QEMU bug causes submodule update to hang, so we copy directly
+            environ['RELATIVE_COPY_PATH'] = '.'
+            extra_args = ' --entrypoint=/usr/bin/qemu-arm-static '
+            return create_docker_jobspec(
+                self.name,
+                'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
+                'tools/run_tests/artifacts/build_artifact_python.sh',
+                environ=environ,
+                timeout_seconds=60 * 60 * 5,
+                docker_base_image='quay.io/grpc/raspbian_{}'.format(self.arch),
+                extra_docker_args=extra_args)
+        elif self.platform == 'linux':
+            if self.arch == 'x86':
+                environ['SETARCH_CMD'] = 'linux32'
+            # Inside the manylinux container, the python installations are located in
+            # special places...
+            environ['PYTHON'] = '/opt/python/{}/bin/python'.format(
+                self.py_version)
+            environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version)
+            # Platform autodetection for the manylinux1 image breaks so we set the
+            # defines ourselves.
+            # TODO(atash) get better platform-detection support in core so we don't
+            # need to do this manually...
+            environ['CFLAGS'] = '-DGPR_MANYLINUX1=1'
+            environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE'
+            environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE'
+            return create_docker_jobspec(
+                self.name,
+                'tools/dockerfile/grpc_artifact_python_manylinux_%s' %
+                self.arch,
+                'tools/run_tests/artifacts/build_artifact_python.sh',
+                environ=environ,
+                timeout_seconds=60 * 60,
+                docker_base_image='quay.io/pypa/manylinux1_i686'
+                if self.arch == 'x86' else 'quay.io/pypa/manylinux1_x86_64')
+        elif self.platform == 'windows':
+            if 'Python27' in self.py_version or 'Python34' in self.py_version:
+                environ['EXT_COMPILER'] = 'mingw32'
+            else:
+                environ['EXT_COMPILER'] = 'msvc'
+            # For some reason, the batch script %random% always runs with the same
+            # seed.  We create a random temp-dir here
+            dir = ''.join(
+                random.choice(string.ascii_uppercase) for _ in range(10))
+            return create_jobspec(
+                self.name, [
+                    'tools\\run_tests\\artifacts\\build_artifact_python.bat',
+                    self.py_version, '32' if self.arch == 'x86' else '64'
+                ],
+                environ=environ,
+                timeout_seconds=45 * 60,
+                use_workspace=True)
+        else:
+            environ['PYTHON'] = self.py_version
+            environ['SKIP_PIP_INSTALL'] = 'TRUE'
+            return create_jobspec(
+                self.name,
+                ['tools/run_tests/artifacts/build_artifact_python.sh'],
+                environ=environ,
+                timeout_seconds=60 * 60,
+                use_workspace=True)
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
 
 
 class RubyArtifact:
-  """Builds ruby native gem."""
+    """Builds ruby native gem."""
 
-  def __init__(self, platform, arch):
-    self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
-    self.platform = platform
-    self.arch = arch
-    self.labels = ['artifact', 'ruby', platform, arch]
+    def __init__(self, platform, arch):
+        self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
+        self.platform = platform
+        self.arch = arch
+        self.labels = ['artifact', 'ruby', platform, arch]
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    # Ruby build uses docker internally and docker cannot be nested.
-    # We are using a custom workspace instead.
-    return create_jobspec(self.name,
-                          ['tools/run_tests/artifacts/build_artifact_ruby.sh'],
-                          use_workspace=True,
-                          timeout_seconds=45*60)
+    def build_jobspec(self):
+        # Ruby build uses docker internally and docker cannot be nested.
+        # We are using a custom workspace instead.
+        return create_jobspec(
+            self.name, ['tools/run_tests/artifacts/build_artifact_ruby.sh'],
+            use_workspace=True,
+            timeout_seconds=45 * 60)
 
 
 class CSharpExtArtifact:
-  """Builds C# native extension library"""
+    """Builds C# native extension library"""
 
-  def __init__(self, platform, arch):
-    self.name = 'csharp_ext_%s_%s' % (platform, arch)
-    self.platform = platform
-    self.arch = arch
-    self.labels = ['artifact', 'csharp', platform, arch]
+    def __init__(self, platform, arch):
+        self.name = 'csharp_ext_%s_%s' % (platform, arch)
+        self.platform = platform
+        self.arch = arch
+        self.labels = ['artifact', 'csharp', platform, arch]
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if self.platform == 'windows':
-      cmake_arch_option = 'Win32' if self.arch == 'x86' else self.arch
-      return create_jobspec(self.name,
-                            ['tools\\run_tests\\artifacts\\build_artifact_csharp.bat',
-                             cmake_arch_option],
-                            use_workspace=True)
-    else:
-      environ = {'CONFIG': 'opt',
-                 'EMBED_OPENSSL': 'true',
-                 'EMBED_ZLIB': 'true',
-                 'CFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
-                 'CXXFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
-                 'LDFLAGS': ''}
-      if self.platform == 'linux':
-        return create_docker_jobspec(self.name,
-            'tools/dockerfile/grpc_artifact_linux_%s' % self.arch,
-            'tools/run_tests/artifacts/build_artifact_csharp.sh',
-            environ=environ)
-      else:
-        archflag = _ARCH_FLAG_MAP[self.arch]
-        environ['CFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
-        environ['CXXFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
-        environ['LDFLAGS'] += ' %s' % archflag
-        return create_jobspec(self.name,
-                              ['tools/run_tests/artifacts/build_artifact_csharp.sh'],
-                              environ=environ,
-                              use_workspace=True)
+    def build_jobspec(self):
+        if self.platform == 'windows':
+            cmake_arch_option = 'Win32' if self.arch == 'x86' else self.arch
+            return create_jobspec(
+                self.name, [
+                    'tools\\run_tests\\artifacts\\build_artifact_csharp.bat',
+                    cmake_arch_option
+                ],
+                use_workspace=True)
+        else:
+            environ = {
+                'CONFIG': 'opt',
+                'EMBED_OPENSSL': 'true',
+                'EMBED_ZLIB': 'true',
+                'CFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
+                'CXXFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
+                'LDFLAGS': ''
+            }
+            if self.platform == 'linux':
+                return create_docker_jobspec(
+                    self.name,
+                    'tools/dockerfile/grpc_artifact_linux_%s' % self.arch,
+                    'tools/run_tests/artifacts/build_artifact_csharp.sh',
+                    environ=environ)
+            else:
+                archflag = _ARCH_FLAG_MAP[self.arch]
+                environ['CFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
+                environ['CXXFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
+                environ['LDFLAGS'] += ' %s' % archflag
+                return create_jobspec(
+                    self.name,
+                    ['tools/run_tests/artifacts/build_artifact_csharp.sh'],
+                    environ=environ,
+                    use_workspace=True)
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
+
 
 class PHPArtifact:
-  """Builds PHP PECL package"""
+    """Builds PHP PECL package"""
 
-  def __init__(self, platform, arch):
-    self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch)
-    self.platform = platform
-    self.arch = arch
-    self.labels = ['artifact', 'php', platform, arch]
+    def __init__(self, platform, arch):
+        self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch)
+        self.platform = platform
+        self.arch = arch
+        self.labels = ['artifact', 'php', platform, arch]
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if self.platform == 'linux':
-      return create_docker_jobspec(
-          self.name,
-          'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
-          'tools/run_tests/artifacts/build_artifact_php.sh')
-    else:
-      return create_jobspec(self.name,
-                            ['tools/run_tests/artifacts/build_artifact_php.sh'],
-                            use_workspace=True)
+    def build_jobspec(self):
+        if self.platform == 'linux':
+            return create_docker_jobspec(
+                self.name,
+                'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
+                'tools/run_tests/artifacts/build_artifact_php.sh')
+        else:
+            return create_jobspec(
+                self.name, ['tools/run_tests/artifacts/build_artifact_php.sh'],
+                use_workspace=True)
+
 
 class ProtocArtifact:
-  """Builds protoc and protoc-plugin artifacts"""
+    """Builds protoc and protoc-plugin artifacts"""
 
-  def __init__(self, platform, arch):
-    self.name = 'protoc_%s_%s' % (platform, arch)
-    self.platform = platform
-    self.arch = arch
-    self.labels = ['artifact', 'protoc', platform, arch]
+    def __init__(self, platform, arch):
+        self.name = 'protoc_%s_%s' % (platform, arch)
+        self.platform = platform
+        self.arch = arch
+        self.labels = ['artifact', 'protoc', platform, arch]
 
-  def pre_build_jobspecs(self):
-      return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if self.platform != 'windows':
-      cxxflags = '-DNDEBUG %s' % _ARCH_FLAG_MAP[self.arch]
-      ldflags = '%s' % _ARCH_FLAG_MAP[self.arch]
-      if self.platform != 'macos':
-        ldflags += '  -static-libgcc -static-libstdc++ -s'
-      environ={'CONFIG': 'opt',
-               'CXXFLAGS': cxxflags,
-               'LDFLAGS': ldflags,
-               'PROTOBUF_LDFLAGS_EXTRA': ldflags}
-      if self.platform == 'linux':
-        return create_docker_jobspec(self.name,
-            'tools/dockerfile/grpc_artifact_protoc',
-            'tools/run_tests/artifacts/build_artifact_protoc.sh',
-            environ=environ)
-      else:
-        environ['CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG
-        return create_jobspec(self.name,
-            ['tools/run_tests/artifacts/build_artifact_protoc.sh'],
-            environ=environ,
-            use_workspace=True)
-    else:
-      generator = 'Visual Studio 14 2015 Win64' if self.arch == 'x64' else 'Visual Studio 14 2015'
-      return create_jobspec(self.name,
-                            ['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'],
-                            environ={'generator': generator},
-                            use_workspace=True)
+    def build_jobspec(self):
+        if self.platform != 'windows':
+            cxxflags = '-DNDEBUG %s' % _ARCH_FLAG_MAP[self.arch]
+            ldflags = '%s' % _ARCH_FLAG_MAP[self.arch]
+            if self.platform != 'macos':
+                ldflags += '  -static-libgcc -static-libstdc++ -s'
+            environ = {
+                'CONFIG': 'opt',
+                'CXXFLAGS': cxxflags,
+                'LDFLAGS': ldflags,
+                'PROTOBUF_LDFLAGS_EXTRA': ldflags
+            }
+            if self.platform == 'linux':
+                return create_docker_jobspec(
+                    self.name,
+                    'tools/dockerfile/grpc_artifact_protoc',
+                    'tools/run_tests/artifacts/build_artifact_protoc.sh',
+                    environ=environ)
+            else:
+                environ[
+                    'CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG
+                return create_jobspec(
+                    self.name,
+                    ['tools/run_tests/artifacts/build_artifact_protoc.sh'],
+                    environ=environ,
+                    use_workspace=True)
+        else:
+            generator = 'Visual Studio 14 2015 Win64' if self.arch == 'x64' else 'Visual Studio 14 2015'
+            return create_jobspec(
+                self.name,
+                ['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'],
+                environ={'generator': generator},
+                use_workspace=True)
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
 
 
 def targets():
-  """Gets list of supported targets"""
-  return ([Cls(platform, arch)
-           for Cls in (CSharpExtArtifact, ProtocArtifact)
-           for platform in ('linux', 'macos', 'windows')
-           for arch in ('x86', 'x64')] +
-          [PythonArtifact('linux', 'x86', 'cp27-cp27m'),
-           PythonArtifact('linux', 'x86', 'cp27-cp27mu'),
-           PythonArtifact('linux', 'x86', 'cp34-cp34m'),
-           PythonArtifact('linux', 'x86', 'cp35-cp35m'),
-           PythonArtifact('linux', 'x86', 'cp36-cp36m'),
-           PythonArtifact('linux_extra', 'armv7', '2.7'),
-           PythonArtifact('linux_extra', 'armv7', '3.4'),
-           PythonArtifact('linux_extra', 'armv7', '3.5'),
-           PythonArtifact('linux_extra', 'armv7', '3.6'),
-           PythonArtifact('linux_extra', 'armv6', '2.7'),
-           PythonArtifact('linux_extra', 'armv6', '3.4'),
-           PythonArtifact('linux_extra', 'armv6', '3.5'),
-           PythonArtifact('linux_extra', 'armv6', '3.6'),
-           PythonArtifact('linux', 'x64', 'cp27-cp27m'),
-           PythonArtifact('linux', 'x64', 'cp27-cp27mu'),
-           PythonArtifact('linux', 'x64', 'cp34-cp34m'),
-           PythonArtifact('linux', 'x64', 'cp35-cp35m'),
-           PythonArtifact('linux', 'x64', 'cp36-cp36m'),
-           PythonArtifact('macos', 'x64', 'python2.7'),
-           PythonArtifact('macos', 'x64', 'python3.4'),
-           PythonArtifact('macos', 'x64', 'python3.5'),
-           PythonArtifact('macos', 'x64', 'python3.6'),
-           PythonArtifact('windows', 'x86', 'Python27_32bits'),
-           PythonArtifact('windows', 'x86', 'Python34_32bits'),
-           PythonArtifact('windows', 'x86', 'Python35_32bits'),
-           PythonArtifact('windows', 'x86', 'Python36_32bits'),
-           PythonArtifact('windows', 'x64', 'Python27'),
-           PythonArtifact('windows', 'x64', 'Python34'),
-           PythonArtifact('windows', 'x64', 'Python35'),
-           PythonArtifact('windows', 'x64', 'Python36'),
-           RubyArtifact('linux', 'x64'),
-           RubyArtifact('macos', 'x64'),
-           PHPArtifact('linux', 'x64'),
-           PHPArtifact('macos', 'x64')])
+    """Gets list of supported targets"""
+    return ([
+        Cls(platform, arch)
+        for Cls in (CSharpExtArtifact, ProtocArtifact)
+        for platform in ('linux', 'macos', 'windows') for arch in ('x86', 'x64')
+    ] + [
+        PythonArtifact('linux', 'x86', 'cp27-cp27m'), PythonArtifact(
+            'linux', 'x86', 'cp27-cp27mu'), PythonArtifact(
+                'linux', 'x86', 'cp34-cp34m'), PythonArtifact(
+                    'linux', 'x86', 'cp35-cp35m'), PythonArtifact(
+                        'linux', 'x86', 'cp36-cp36m'), PythonArtifact(
+                            'linux_extra', 'armv7', '2.7'), PythonArtifact(
+                                'linux_extra', 'armv7', '3.4'), PythonArtifact(
+                                    'linux_extra', 'armv7', '3.5'),
+        PythonArtifact('linux_extra', 'armv7', '3.6'), PythonArtifact(
+            'linux_extra', 'armv6', '2.7'), PythonArtifact(
+                'linux_extra', 'armv6', '3.4'), PythonArtifact(
+                    'linux_extra', 'armv6', '3.5'), PythonArtifact(
+                        'linux_extra', 'armv6', '3.6'), PythonArtifact(
+                            'linux', 'x64', 'cp27-cp27m'), PythonArtifact(
+                                'linux', 'x64', 'cp27-cp27mu'), PythonArtifact(
+                                    'linux', 'x64', 'cp34-cp34m'),
+        PythonArtifact('linux', 'x64', 'cp35-cp35m'), PythonArtifact(
+            'linux', 'x64', 'cp36-cp36m'), PythonArtifact(
+                'macos', 'x64', 'python2.7'), PythonArtifact(
+                    'macos', 'x64', 'python3.4'), PythonArtifact('macos', 'x64',
+                                                                 'python3.5'),
+        PythonArtifact('macos', 'x64', 'python3.6'), PythonArtifact(
+            'windows', 'x86', 'Python27_32bits'), PythonArtifact(
+                'windows', 'x86', 'Python34_32bits'), PythonArtifact(
+                    'windows', 'x86', 'Python35_32bits'), PythonArtifact(
+                        'windows', 'x86', 'Python36_32bits'), PythonArtifact(
+                            'windows', 'x64', 'Python27'),
+        PythonArtifact('windows', 'x64', 'Python34'), PythonArtifact(
+            'windows', 'x64', 'Python35'), PythonArtifact(
+                'windows', 'x64', 'Python36'), RubyArtifact(
+                    'linux', 'x64'), RubyArtifact('macos', 'x64'), PHPArtifact(
+                        'linux', 'x64'), PHPArtifact('macos', 'x64')
+    ])
diff --git a/tools/run_tests/artifacts/distribtest_targets.py b/tools/run_tests/artifacts/distribtest_targets.py
index 7ba0e0e..83f656b 100644
--- a/tools/run_tests/artifacts/distribtest_targets.py
+++ b/tools/run_tests/artifacts/distribtest_targets.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Definition of targets run distribution package tests."""
 
 import os.path
@@ -22,280 +21,306 @@
 import python_utils.jobset as jobset
 
 
-def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
-                   flake_retries=0, timeout_retries=0,
-                   copy_rel_path=None):
-  """Creates jobspec for a task running under docker."""
-  environ = environ.copy()
-  environ['RUN_COMMAND'] = shell_command
-  # the entire repo will be cloned if copy_rel_path is not set.
-  if copy_rel_path:
-    environ['RELATIVE_COPY_PATH'] = copy_rel_path
+def create_docker_jobspec(name,
+                          dockerfile_dir,
+                          shell_command,
+                          environ={},
+                          flake_retries=0,
+                          timeout_retries=0,
+                          copy_rel_path=None):
+    """Creates jobspec for a task running under docker."""
+    environ = environ.copy()
+    environ['RUN_COMMAND'] = shell_command
+    # the entire repo will be cloned if copy_rel_path is not set.
+    if copy_rel_path:
+        environ['RELATIVE_COPY_PATH'] = copy_rel_path
 
-  docker_args=[]
-  for k,v in environ.items():
-    docker_args += ['-e', '%s=%s' % (k, v)]
-  docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
-                'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'}
-  jobspec = jobset.JobSpec(
-          cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
-          environ=docker_env,
-          shortname='distribtest.%s' % (name),
-          timeout_seconds=30*60,
-          flake_retries=flake_retries,
-          timeout_retries=timeout_retries)
-  return jobspec
+    docker_args = []
+    for k, v in environ.items():
+        docker_args += ['-e', '%s=%s' % (k, v)]
+    docker_env = {
+        'DOCKERFILE_DIR': dockerfile_dir,
+        'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'
+    }
+    jobspec = jobset.JobSpec(
+        cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
+        docker_args,
+        environ=docker_env,
+        shortname='distribtest.%s' % (name),
+        timeout_seconds=30 * 60,
+        flake_retries=flake_retries,
+        timeout_retries=timeout_retries)
+    return jobspec
 
 
-def create_jobspec(name, cmdline, environ=None, shell=False,
-                   flake_retries=0, timeout_retries=0,
+def create_jobspec(name,
+                   cmdline,
+                   environ=None,
+                   shell=False,
+                   flake_retries=0,
+                   timeout_retries=0,
                    use_workspace=False,
-                   timeout_seconds=10*60):
-  """Creates jobspec."""
-  environ = environ.copy()
-  if use_workspace:
-    environ['WORKSPACE_NAME'] = 'workspace_%s' % name
-    cmdline = ['bash',
-               'tools/run_tests/artifacts/run_in_workspace.sh'] + cmdline
-  jobspec = jobset.JobSpec(
-          cmdline=cmdline,
-          environ=environ,
-          shortname='distribtest.%s' % (name),
-          timeout_seconds=timeout_seconds,
-          flake_retries=flake_retries,
-          timeout_retries=timeout_retries,
-          shell=shell)
-  return jobspec
+                   timeout_seconds=10 * 60):
+    """Creates jobspec."""
+    environ = environ.copy()
+    if use_workspace:
+        environ['WORKSPACE_NAME'] = 'workspace_%s' % name
+        cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
+                  ] + cmdline
+    jobspec = jobset.JobSpec(
+        cmdline=cmdline,
+        environ=environ,
+        shortname='distribtest.%s' % (name),
+        timeout_seconds=timeout_seconds,
+        flake_retries=flake_retries,
+        timeout_retries=timeout_retries,
+        shell=shell)
+    return jobspec
 
 
 class CSharpDistribTest(object):
-  """Tests C# NuGet package"""
+    """Tests C# NuGet package"""
 
-  def __init__(self, platform, arch, docker_suffix=None, use_dotnet_cli=False):
-    self.name = 'csharp_%s_%s' % (platform, arch)
-    self.platform = platform
-    self.arch = arch
-    self.docker_suffix = docker_suffix
-    self.labels = ['distribtest', 'csharp', platform, arch]
-    self.script_suffix = ''
-    if docker_suffix:
-      self.name += '_%s' % docker_suffix
-      self.labels.append(docker_suffix)
-    if use_dotnet_cli:
-      self.name += '_dotnetcli'
-      self.script_suffix = '_dotnetcli'
-      self.labels.append('dotnetcli')
-    else:
-      self.labels.append('olddotnet')
+    def __init__(self, platform, arch, docker_suffix=None,
+                 use_dotnet_cli=False):
+        self.name = 'csharp_%s_%s' % (platform, arch)
+        self.platform = platform
+        self.arch = arch
+        self.docker_suffix = docker_suffix
+        self.labels = ['distribtest', 'csharp', platform, arch]
+        self.script_suffix = ''
+        if docker_suffix:
+            self.name += '_%s' % docker_suffix
+            self.labels.append(docker_suffix)
+        if use_dotnet_cli:
+            self.name += '_dotnetcli'
+            self.script_suffix = '_dotnetcli'
+            self.labels.append('dotnetcli')
+        else:
+            self.labels.append('olddotnet')
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if self.platform == 'linux':
-      return create_docker_jobspec(self.name,
-          'tools/dockerfile/distribtest/csharp_%s_%s' % (
-              self.docker_suffix,
-              self.arch),
-          'test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix,
-          copy_rel_path='test/distrib')
-    elif self.platform == 'macos':
-      return create_jobspec(self.name,
-          ['test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix],
-          environ={'EXTERNAL_GIT_ROOT': '../../../..'},
-          use_workspace=True)
-    elif self.platform == 'windows':
-      if self.arch == 'x64':
-        # Use double leading / as the first occurence gets removed by msys bash
-        # when invoking the .bat file (side-effect of posix path conversion)
-        environ={'MSBUILD_EXTRA_ARGS': '//p:Platform=x64',
-                 'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'}
-      else:
-        environ={'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'}
-      return create_jobspec(self.name,
-          ['test\\distrib\\csharp\\run_distrib_test%s.bat' % self.script_suffix],
-          environ=environ,
-          use_workspace=True)
-    else:
-      raise Exception("Not supported yet.")
+    def build_jobspec(self):
+        if self.platform == 'linux':
+            return create_docker_jobspec(
+                self.name,
+                'tools/dockerfile/distribtest/csharp_%s_%s' % (
+                    self.docker_suffix, self.arch),
+                'test/distrib/csharp/run_distrib_test%s.sh' %
+                self.script_suffix,
+                copy_rel_path='test/distrib')
+        elif self.platform == 'macos':
+            return create_jobspec(
+                self.name, [
+                    'test/distrib/csharp/run_distrib_test%s.sh' %
+                    self.script_suffix
+                ],
+                environ={'EXTERNAL_GIT_ROOT': '../../../..'},
+                use_workspace=True)
+        elif self.platform == 'windows':
+            if self.arch == 'x64':
+                # Use double leading / as the first occurence gets removed by msys bash
+                # when invoking the .bat file (side-effect of posix path conversion)
+                environ = {
+                    'MSBUILD_EXTRA_ARGS': '//p:Platform=x64',
+                    'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'
+                }
+            else:
+                environ = {'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'}
+            return create_jobspec(
+                self.name, [
+                    'test\\distrib\\csharp\\run_distrib_test%s.bat' %
+                    self.script_suffix
+                ],
+                environ=environ,
+                use_workspace=True)
+        else:
+            raise Exception("Not supported yet.")
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
+
 
 class PythonDistribTest(object):
-  """Tests Python package"""
+    """Tests Python package"""
 
-  def __init__(self, platform, arch, docker_suffix):
-    self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
-    self.platform = platform
-    self.arch = arch
-    self.docker_suffix = docker_suffix
-    self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
+    def __init__(self, platform, arch, docker_suffix):
+        self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
+        self.platform = platform
+        self.arch = arch
+        self.docker_suffix = docker_suffix
+        self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if not self.platform == 'linux':
-      raise Exception("Not supported yet.")
+    def build_jobspec(self):
+        if not self.platform == 'linux':
+            raise Exception("Not supported yet.")
 
-    return create_docker_jobspec(self.name,
-          'tools/dockerfile/distribtest/python_%s_%s' % (
-              self.docker_suffix,
-              self.arch),
-          'test/distrib/python/run_distrib_test.sh',
-          copy_rel_path='test/distrib')
+        return create_docker_jobspec(
+            self.name,
+            'tools/dockerfile/distribtest/python_%s_%s' % (self.docker_suffix,
+                                                           self.arch),
+            'test/distrib/python/run_distrib_test.sh',
+            copy_rel_path='test/distrib')
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
 
 
 class RubyDistribTest(object):
-  """Tests Ruby package"""
+    """Tests Ruby package"""
 
-  def __init__(self, platform, arch, docker_suffix):
-    self.name = 'ruby_%s_%s_%s' % (platform, arch, docker_suffix)
-    self.platform = platform
-    self.arch = arch
-    self.docker_suffix = docker_suffix
-    self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
+    def __init__(self, platform, arch, docker_suffix):
+        self.name = 'ruby_%s_%s_%s' % (platform, arch, docker_suffix)
+        self.platform = platform
+        self.arch = arch
+        self.docker_suffix = docker_suffix
+        self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if not self.platform == 'linux':
-      raise Exception("Not supported yet.")
+    def build_jobspec(self):
+        if not self.platform == 'linux':
+            raise Exception("Not supported yet.")
 
-    return create_docker_jobspec(self.name,
-          'tools/dockerfile/distribtest/ruby_%s_%s' % (
-              self.docker_suffix,
-              self.arch),
-          'test/distrib/ruby/run_distrib_test.sh',
-          copy_rel_path='test/distrib')
+        return create_docker_jobspec(
+            self.name,
+            'tools/dockerfile/distribtest/ruby_%s_%s' % (self.docker_suffix,
+                                                         self.arch),
+            'test/distrib/ruby/run_distrib_test.sh',
+            copy_rel_path='test/distrib')
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
 
 
 class PHPDistribTest(object):
-  """Tests PHP package"""
+    """Tests PHP package"""
 
-  def __init__(self, platform, arch, docker_suffix=None):
-    self.name = 'php_%s_%s_%s' % (platform, arch, docker_suffix)
-    self.platform = platform
-    self.arch = arch
-    self.docker_suffix = docker_suffix
-    self.labels = ['distribtest', 'php', platform, arch, docker_suffix]
+    def __init__(self, platform, arch, docker_suffix=None):
+        self.name = 'php_%s_%s_%s' % (platform, arch, docker_suffix)
+        self.platform = platform
+        self.arch = arch
+        self.docker_suffix = docker_suffix
+        self.labels = ['distribtest', 'php', platform, arch, docker_suffix]
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if self.platform == 'linux':
-      return create_docker_jobspec(self.name,
-                                   'tools/dockerfile/distribtest/php_%s_%s' % (
-                                       self.docker_suffix,
-                                       self.arch),
-                                   'test/distrib/php/run_distrib_test.sh',
-                                   copy_rel_path='test/distrib')
-    elif self.platform == 'macos':
-      return create_jobspec(self.name,
-          ['test/distrib/php/run_distrib_test.sh'],
-          environ={'EXTERNAL_GIT_ROOT': '../../../..'},
-          use_workspace=True)
-    else:
-      raise Exception("Not supported yet.")
+    def build_jobspec(self):
+        if self.platform == 'linux':
+            return create_docker_jobspec(
+                self.name,
+                'tools/dockerfile/distribtest/php_%s_%s' % (self.docker_suffix,
+                                                            self.arch),
+                'test/distrib/php/run_distrib_test.sh',
+                copy_rel_path='test/distrib')
+        elif self.platform == 'macos':
+            return create_jobspec(
+                self.name, ['test/distrib/php/run_distrib_test.sh'],
+                environ={'EXTERNAL_GIT_ROOT': '../../../..'},
+                use_workspace=True)
+        else:
+            raise Exception("Not supported yet.")
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
 
 
 class CppDistribTest(object):
-  """Tests Cpp make intall by building examples."""
+    """Tests Cpp make intall by building examples."""
 
-  def __init__(self, platform, arch, docker_suffix=None, testcase=None):
-    if platform == 'linux':
-      self.name = 'cpp_%s_%s_%s_%s' % (platform, arch, docker_suffix, testcase)
-    else:
-      self.name = 'cpp_%s_%s_%s' % (platform, arch, testcase)
-    self.platform = platform
-    self.arch = arch
-    self.docker_suffix = docker_suffix
-    self.testcase = testcase
-    self.labels = ['distribtest', 'cpp', platform, arch, docker_suffix, testcase]
+    def __init__(self, platform, arch, docker_suffix=None, testcase=None):
+        if platform == 'linux':
+            self.name = 'cpp_%s_%s_%s_%s' % (platform, arch, docker_suffix,
+                                             testcase)
+        else:
+            self.name = 'cpp_%s_%s_%s' % (platform, arch, testcase)
+        self.platform = platform
+        self.arch = arch
+        self.docker_suffix = docker_suffix
+        self.testcase = testcase
+        self.labels = [
+            'distribtest', 'cpp', platform, arch, docker_suffix, testcase
+        ]
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if self.platform == 'linux':
-      return create_docker_jobspec(self.name,
-                                   'tools/dockerfile/distribtest/cpp_%s_%s' % (
-                                       self.docker_suffix,
-                                       self.arch),
-                                   'test/distrib/cpp/run_distrib_test_%s.sh' % self.testcase)
-    elif self.platform == 'windows':
-      return create_jobspec(self.name,
-                            ['test\\distrib\\cpp\\run_distrib_test_%s.bat' % self.testcase],
-                            environ={},
-                            timeout_seconds=30*60,
-                            use_workspace=True)
-    else:
-      raise Exception("Not supported yet.")
+    def build_jobspec(self):
+        if self.platform == 'linux':
+            return create_docker_jobspec(
+                self.name, 'tools/dockerfile/distribtest/cpp_%s_%s' % (
+                    self.docker_suffix, self.arch),
+                'test/distrib/cpp/run_distrib_test_%s.sh' % self.testcase)
+        elif self.platform == 'windows':
+            return create_jobspec(
+                self.name,
+                ['test\\distrib\\cpp\\run_distrib_test_%s.bat' % self.testcase],
+                environ={},
+                timeout_seconds=30 * 60,
+                use_workspace=True)
+        else:
+            raise Exception("Not supported yet.")
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
 
 
 def targets():
-  """Gets list of supported targets"""
-  return [CppDistribTest('linux', 'x64', 'jessie', 'routeguide'),
-          CppDistribTest('linux', 'x64', 'jessie', 'cmake'),
-          CppDistribTest('windows', 'x86', testcase='cmake'),
-          CSharpDistribTest('linux', 'x64', 'wheezy'),
-          CSharpDistribTest('linux', 'x64', 'jessie'),
-          CSharpDistribTest('linux', 'x86', 'jessie'),
-          CSharpDistribTest('linux', 'x64', 'centos7'),
-          CSharpDistribTest('linux', 'x64', 'ubuntu1404'),
-          CSharpDistribTest('linux', 'x64', 'ubuntu1504'),
-          CSharpDistribTest('linux', 'x64', 'ubuntu1510'),
-          CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
-          CSharpDistribTest('linux', 'x64', 'ubuntu1404', use_dotnet_cli=True),
-          CSharpDistribTest('macos', 'x86'),
-          CSharpDistribTest('windows', 'x86'),
-          CSharpDistribTest('windows', 'x64'),
-          PythonDistribTest('linux', 'x64', 'wheezy'),
-          PythonDistribTest('linux', 'x64', 'jessie'),
-          PythonDistribTest('linux', 'x86', 'jessie'),
-          PythonDistribTest('linux', 'x64', 'centos6'),
-          PythonDistribTest('linux', 'x64', 'centos7'),
-          PythonDistribTest('linux', 'x64', 'fedora20'),
-          PythonDistribTest('linux', 'x64', 'fedora21'),
-          PythonDistribTest('linux', 'x64', 'fedora22'),
-          PythonDistribTest('linux', 'x64', 'fedora23'),
-          PythonDistribTest('linux', 'x64', 'opensuse'),
-          PythonDistribTest('linux', 'x64', 'arch'),
-          PythonDistribTest('linux', 'x64', 'ubuntu1204'),
-          PythonDistribTest('linux', 'x64', 'ubuntu1404'),
-          PythonDistribTest('linux', 'x64', 'ubuntu1504'),
-          PythonDistribTest('linux', 'x64', 'ubuntu1510'),
-          PythonDistribTest('linux', 'x64', 'ubuntu1604'),
-          RubyDistribTest('linux', 'x64', 'wheezy'),
-          RubyDistribTest('linux', 'x64', 'jessie'),
-          RubyDistribTest('linux', 'x86', 'jessie'),
-          RubyDistribTest('linux', 'x64', 'centos6'),
-          RubyDistribTest('linux', 'x64', 'centos7'),
-          RubyDistribTest('linux', 'x64', 'fedora20'),
-          RubyDistribTest('linux', 'x64', 'fedora21'),
-          RubyDistribTest('linux', 'x64', 'fedora22'),
-          RubyDistribTest('linux', 'x64', 'fedora23'),
-          RubyDistribTest('linux', 'x64', 'opensuse'),
-          RubyDistribTest('linux', 'x64', 'ubuntu1204'),
-          RubyDistribTest('linux', 'x64', 'ubuntu1404'),
-          RubyDistribTest('linux', 'x64', 'ubuntu1504'),
-          RubyDistribTest('linux', 'x64', 'ubuntu1510'),
-          RubyDistribTest('linux', 'x64', 'ubuntu1604'),
-          PHPDistribTest('linux', 'x64', 'jessie'),
-          PHPDistribTest('macos', 'x64'),
-          ]
+    """Gets list of supported targets"""
+    return [
+        CppDistribTest('linux', 'x64', 'jessie', 'routeguide'),
+        CppDistribTest('linux', 'x64', 'jessie', 'cmake'),
+        CppDistribTest('windows', 'x86', testcase='cmake'),
+        CSharpDistribTest('linux', 'x64', 'wheezy'),
+        CSharpDistribTest('linux', 'x64', 'jessie'),
+        CSharpDistribTest('linux', 'x86', 'jessie'),
+        CSharpDistribTest('linux', 'x64', 'centos7'),
+        CSharpDistribTest('linux', 'x64', 'ubuntu1404'),
+        CSharpDistribTest('linux', 'x64', 'ubuntu1504'),
+        CSharpDistribTest('linux', 'x64', 'ubuntu1510'),
+        CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
+        CSharpDistribTest('linux', 'x64', 'ubuntu1404', use_dotnet_cli=True),
+        CSharpDistribTest('macos', 'x86'),
+        CSharpDistribTest('windows', 'x86'),
+        CSharpDistribTest('windows', 'x64'),
+        PythonDistribTest('linux', 'x64', 'wheezy'),
+        PythonDistribTest('linux', 'x64', 'jessie'),
+        PythonDistribTest('linux', 'x86', 'jessie'),
+        PythonDistribTest('linux', 'x64', 'centos6'),
+        PythonDistribTest('linux', 'x64', 'centos7'),
+        PythonDistribTest('linux', 'x64', 'fedora20'),
+        PythonDistribTest('linux', 'x64', 'fedora21'),
+        PythonDistribTest('linux', 'x64', 'fedora22'),
+        PythonDistribTest('linux', 'x64', 'fedora23'),
+        PythonDistribTest('linux', 'x64', 'opensuse'),
+        PythonDistribTest('linux', 'x64', 'arch'),
+        PythonDistribTest('linux', 'x64', 'ubuntu1204'),
+        PythonDistribTest('linux', 'x64', 'ubuntu1404'),
+        PythonDistribTest('linux', 'x64', 'ubuntu1504'),
+        PythonDistribTest('linux', 'x64', 'ubuntu1510'),
+        PythonDistribTest('linux', 'x64', 'ubuntu1604'),
+        RubyDistribTest('linux', 'x64', 'wheezy'),
+        RubyDistribTest('linux', 'x64', 'jessie'),
+        RubyDistribTest('linux', 'x86', 'jessie'),
+        RubyDistribTest('linux', 'x64', 'centos6'),
+        RubyDistribTest('linux', 'x64', 'centos7'),
+        RubyDistribTest('linux', 'x64', 'fedora20'),
+        RubyDistribTest('linux', 'x64', 'fedora21'),
+        RubyDistribTest('linux', 'x64', 'fedora22'),
+        RubyDistribTest('linux', 'x64', 'fedora23'),
+        RubyDistribTest('linux', 'x64', 'opensuse'),
+        RubyDistribTest('linux', 'x64', 'ubuntu1204'),
+        RubyDistribTest('linux', 'x64', 'ubuntu1404'),
+        RubyDistribTest('linux', 'x64', 'ubuntu1504'),
+        RubyDistribTest('linux', 'x64', 'ubuntu1510'),
+        RubyDistribTest('linux', 'x64', 'ubuntu1604'),
+        PHPDistribTest('linux', 'x64', 'jessie'),
+        PHPDistribTest('macos', 'x64'),
+    ]
diff --git a/tools/run_tests/artifacts/package_targets.py b/tools/run_tests/artifacts/package_targets.py
index ff93bb3..5290845 100644
--- a/tools/run_tests/artifacts/package_targets.py
+++ b/tools/run_tests/artifacts/package_targets.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Definition of targets to build distribution packages."""
 
 import os.path
@@ -22,128 +21,137 @@
 import python_utils.jobset as jobset
 
 
-def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
-                   flake_retries=0, timeout_retries=0):
-  """Creates jobspec for a task running under docker."""
-  environ = environ.copy()
-  environ['RUN_COMMAND'] = shell_command
+def create_docker_jobspec(name,
+                          dockerfile_dir,
+                          shell_command,
+                          environ={},
+                          flake_retries=0,
+                          timeout_retries=0):
+    """Creates jobspec for a task running under docker."""
+    environ = environ.copy()
+    environ['RUN_COMMAND'] = shell_command
 
-  docker_args=[]
-  for k,v in environ.items():
-    docker_args += ['-e', '%s=%s' % (k, v)]
-  docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
-                'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
-                'OUTPUT_DIR': 'artifacts'}
-  jobspec = jobset.JobSpec(
-          cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
-          environ=docker_env,
-          shortname='build_package.%s' % (name),
-          timeout_seconds=30*60,
-          flake_retries=flake_retries,
-          timeout_retries=timeout_retries)
-  return jobspec
+    docker_args = []
+    for k, v in environ.items():
+        docker_args += ['-e', '%s=%s' % (k, v)]
+    docker_env = {
+        'DOCKERFILE_DIR': dockerfile_dir,
+        'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
+        'OUTPUT_DIR': 'artifacts'
+    }
+    jobspec = jobset.JobSpec(
+        cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
+        docker_args,
+        environ=docker_env,
+        shortname='build_package.%s' % (name),
+        timeout_seconds=30 * 60,
+        flake_retries=flake_retries,
+        timeout_retries=timeout_retries)
+    return jobspec
 
-def create_jobspec(name, cmdline, environ=None, cwd=None, shell=False,
-                   flake_retries=0, timeout_retries=0):
-  """Creates jobspec."""
-  jobspec = jobset.JobSpec(
-          cmdline=cmdline,
-          environ=environ,
-          cwd=cwd,
-          shortname='build_package.%s' % (name),
-          timeout_seconds=10*60,
-          flake_retries=flake_retries,
-          timeout_retries=timeout_retries,
-          shell=shell)
-  return jobspec
+
+def create_jobspec(name,
+                   cmdline,
+                   environ=None,
+                   cwd=None,
+                   shell=False,
+                   flake_retries=0,
+                   timeout_retries=0):
+    """Creates jobspec."""
+    jobspec = jobset.JobSpec(
+        cmdline=cmdline,
+        environ=environ,
+        cwd=cwd,
+        shortname='build_package.%s' % (name),
+        timeout_seconds=10 * 60,
+        flake_retries=flake_retries,
+        timeout_retries=timeout_retries,
+        shell=shell)
+    return jobspec
 
 
 class CSharpPackage:
-  """Builds C# nuget packages."""
+    """Builds C# nuget packages."""
 
-  def __init__(self, linux=False):
-    self.linux = linux
-    self.labels = ['package', 'csharp']
-    if linux:
-      self.name = 'csharp_package_dotnetcli_linux'
-      self.labels += ['linux']
-    else:
-      self.name = 'csharp_package_dotnetcli_windows'
-      self.labels += ['windows']
+    def __init__(self, linux=False):
+        self.linux = linux
+        self.labels = ['package', 'csharp']
+        if linux:
+            self.name = 'csharp_package_dotnetcli_linux'
+            self.labels += ['linux']
+        else:
+            self.name = 'csharp_package_dotnetcli_windows'
+            self.labels += ['windows']
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if self.linux:
-      return create_docker_jobspec(
-          self.name,
-          'tools/dockerfile/test/csharp_jessie_x64',
-          'src/csharp/build_packages_dotnetcli.sh')
-    else:
-      return create_jobspec(self.name,
-                            ['build_packages_dotnetcli.bat'],
-                            cwd='src\\csharp',
-                            shell=True)
+    def build_jobspec(self):
+        if self.linux:
+            return create_docker_jobspec(
+                self.name, 'tools/dockerfile/test/csharp_jessie_x64',
+                'src/csharp/build_packages_dotnetcli.sh')
+        else:
+            return create_jobspec(
+                self.name, ['build_packages_dotnetcli.bat'],
+                cwd='src\\csharp',
+                shell=True)
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
+
 
 class RubyPackage:
-  """Collects ruby gems created in the artifact phase"""
+    """Collects ruby gems created in the artifact phase"""
 
-  def __init__(self):
-    self.name = 'ruby_package'
-    self.labels = ['package', 'ruby', 'linux']
+    def __init__(self):
+        self.name = 'ruby_package'
+        self.labels = ['package', 'ruby', 'linux']
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    return create_docker_jobspec(
-        self.name,
-        'tools/dockerfile/grpc_artifact_linux_x64',
-        'tools/run_tests/artifacts/build_package_ruby.sh')
+    def build_jobspec(self):
+        return create_docker_jobspec(
+            self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
+            'tools/run_tests/artifacts/build_package_ruby.sh')
 
 
 class PythonPackage:
-  """Collects python eggs and wheels created in the artifact phase"""
+    """Collects python eggs and wheels created in the artifact phase"""
 
-  def __init__(self):
-    self.name = 'python_package'
-    self.labels = ['package', 'python', 'linux']
+    def __init__(self):
+        self.name = 'python_package'
+        self.labels = ['package', 'python', 'linux']
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    return create_docker_jobspec(
-        self.name,
-        'tools/dockerfile/grpc_artifact_linux_x64',
-        'tools/run_tests/artifacts/build_package_python.sh')
+    def build_jobspec(self):
+        return create_docker_jobspec(
+            self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
+            'tools/run_tests/artifacts/build_package_python.sh')
 
 
 class PHPPackage:
-  """Copy PHP PECL package artifact"""
+    """Copy PHP PECL package artifact"""
 
-  def __init__(self):
-    self.name = 'php_package'
-    self.labels = ['package', 'php', 'linux']
+    def __init__(self):
+        self.name = 'php_package'
+        self.labels = ['package', 'php', 'linux']
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    return create_docker_jobspec(
-        self.name,
-        'tools/dockerfile/grpc_artifact_linux_x64',
-        'tools/run_tests/artifacts/build_package_php.sh')
+    def build_jobspec(self):
+        return create_docker_jobspec(
+            self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
+            'tools/run_tests/artifacts/build_package_php.sh')
 
 
 def targets():
-  """Gets list of supported targets"""
-  return [CSharpPackage(),
-          CSharpPackage(linux=True),
-          RubyPackage(),
-          PythonPackage(),
-          PHPPackage()]
+    """Gets list of supported targets"""
+    return [
+        CSharpPackage(), CSharpPackage(linux=True), RubyPackage(),
+        PythonPackage(), PHPPackage()
+    ]
diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json
index 3958d8e..19f5494 100644
--- a/tools/run_tests/generated/sources_and_headers.json
+++ b/tools/run_tests/generated/sources_and_headers.json
@@ -590,45 +590,6 @@
   {
     "deps": [
       "gpr", 
-      "grpc"
-    ], 
-    "headers": [], 
-    "is_filegroup": false, 
-    "language": "c", 
-    "name": "gen_hpack_tables", 
-    "src": [
-      "tools/codegen/core/gen_hpack_tables.c"
-    ], 
-    "third_party": false, 
-    "type": "target"
-  }, 
-  {
-    "deps": [], 
-    "headers": [], 
-    "is_filegroup": false, 
-    "language": "c", 
-    "name": "gen_legal_metadata_characters", 
-    "src": [
-      "tools/codegen/core/gen_legal_metadata_characters.c"
-    ], 
-    "third_party": false, 
-    "type": "target"
-  }, 
-  {
-    "deps": [], 
-    "headers": [], 
-    "is_filegroup": false, 
-    "language": "c", 
-    "name": "gen_percent_encoding_tables", 
-    "src": [
-      "tools/codegen/core/gen_percent_encoding_tables.c"
-    ], 
-    "third_party": false, 
-    "type": "target"
-  }, 
-  {
-    "deps": [
-      "gpr", 
       "gpr_test_util", 
       "grpc", 
       "grpc_test_util"
@@ -4337,6 +4298,45 @@
   }, 
   {
     "deps": [
+      "gpr", 
+      "grpc"
+    ], 
+    "headers": [], 
+    "is_filegroup": false, 
+    "language": "cc", 
+    "name": "gen_hpack_tables", 
+    "src": [
+      "tools/codegen/core/gen_hpack_tables.cc"
+    ], 
+    "third_party": false, 
+    "type": "target"
+  }, 
+  {
+    "deps": [], 
+    "headers": [], 
+    "is_filegroup": false, 
+    "language": "cc", 
+    "name": "gen_legal_metadata_characters", 
+    "src": [
+      "tools/codegen/core/gen_legal_metadata_characters.cc"
+    ], 
+    "third_party": false, 
+    "type": "target"
+  }, 
+  {
+    "deps": [], 
+    "headers": [], 
+    "is_filegroup": false, 
+    "language": "cc", 
+    "name": "gen_percent_encoding_tables", 
+    "src": [
+      "tools/codegen/core/gen_percent_encoding_tables.cc"
+    ], 
+    "third_party": false, 
+    "type": "target"
+  }, 
+  {
+    "deps": [
       "boringssl", 
       "boringssl_aes_test_lib", 
       "boringssl_test_util"
diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json
index 6ffc7f4..f8985b6 100644
--- a/tools/run_tests/generated/tests.json
+++ b/tools/run_tests/generated/tests.json
@@ -48732,6 +48732,32 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_1cq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 1000000}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_1cq_secure", 
+    "timeout_seconds": 120
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 2}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -48758,6 +48784,32 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_1cq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_1cq_secure", 
+    "timeout_seconds": 120
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -48784,6 +48836,32 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_1cq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_1cq_secure", 
+    "timeout_seconds": 120
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -49620,6 +49698,32 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 1000000}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_1cq_insecure", 
+    "timeout_seconds": 120
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 2}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -49646,6 +49750,32 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_1cq_insecure", 
+    "timeout_seconds": 120
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -49672,6 +49802,32 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_1cq_insecure", 
+    "timeout_seconds": 120
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -50561,6 +50717,32 @@
     "args": [
       "--run_inproc", 
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 1000000}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "qps_json_driver", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "qps_json_driver:inproc_cpp_generic_async_streaming_qps_unconstrained_1cq_insecure", 
+    "timeout_seconds": 360
+  }, 
+  {
+    "args": [
+      "--run_inproc", 
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 2}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "boringssl": true, 
@@ -50587,6 +50769,32 @@
     "args": [
       "--run_inproc", 
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "qps_json_driver", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "qps_json_driver:inproc_cpp_protobuf_async_streaming_qps_unconstrained_1cq_insecure", 
+    "timeout_seconds": 360
+  }, 
+  {
+    "args": [
+      "--run_inproc", 
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "boringssl": true, 
@@ -50613,6 +50821,32 @@
     "args": [
       "--run_inproc", 
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "qps_json_driver", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "qps_json_driver:inproc_cpp_protobuf_async_unary_qps_unconstrained_1cq_insecure", 
+    "timeout_seconds": 360
+  }, 
+  {
+    "args": [
+      "--run_inproc", 
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "boringssl": true, 
@@ -51409,6 +51643,45 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_1cq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 1000000}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "asan-noleaks", 
+      "asan-trace-cmp", 
+      "basicprof", 
+      "c++-compat", 
+      "counters", 
+      "dbg", 
+      "gcov", 
+      "helgrind", 
+      "lto", 
+      "memcheck", 
+      "msan", 
+      "mutrace", 
+      "opt", 
+      "stapprof", 
+      "ubsan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_1cq_secure_low_thread_count", 
+    "timeout_seconds": 600
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 2}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -51448,6 +51721,45 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_1cq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "asan-noleaks", 
+      "asan-trace-cmp", 
+      "basicprof", 
+      "c++-compat", 
+      "counters", 
+      "dbg", 
+      "gcov", 
+      "helgrind", 
+      "lto", 
+      "memcheck", 
+      "msan", 
+      "mutrace", 
+      "opt", 
+      "stapprof", 
+      "ubsan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_1cq_secure_low_thread_count", 
+    "timeout_seconds": 600
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -51487,6 +51799,45 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_1cq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "asan-noleaks", 
+      "asan-trace-cmp", 
+      "basicprof", 
+      "c++-compat", 
+      "counters", 
+      "dbg", 
+      "gcov", 
+      "helgrind", 
+      "lto", 
+      "memcheck", 
+      "msan", 
+      "mutrace", 
+      "opt", 
+      "stapprof", 
+      "ubsan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_1cq_secure_low_thread_count", 
+    "timeout_seconds": 600
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -52739,6 +53090,45 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 1000000}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "asan-noleaks", 
+      "asan-trace-cmp", 
+      "basicprof", 
+      "c++-compat", 
+      "counters", 
+      "dbg", 
+      "gcov", 
+      "helgrind", 
+      "lto", 
+      "memcheck", 
+      "msan", 
+      "mutrace", 
+      "opt", 
+      "stapprof", 
+      "ubsan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_1cq_insecure_low_thread_count", 
+    "timeout_seconds": 600
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 2}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -52778,6 +53168,45 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "asan-noleaks", 
+      "asan-trace-cmp", 
+      "basicprof", 
+      "c++-compat", 
+      "counters", 
+      "dbg", 
+      "gcov", 
+      "helgrind", 
+      "lto", 
+      "memcheck", 
+      "msan", 
+      "mutrace", 
+      "opt", 
+      "stapprof", 
+      "ubsan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_1cq_insecure_low_thread_count", 
+    "timeout_seconds": 600
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -52817,6 +53246,45 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "asan-noleaks", 
+      "asan-trace-cmp", 
+      "basicprof", 
+      "c++-compat", 
+      "counters", 
+      "dbg", 
+      "gcov", 
+      "helgrind", 
+      "lto", 
+      "memcheck", 
+      "msan", 
+      "mutrace", 
+      "opt", 
+      "stapprof", 
+      "ubsan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_1cq_insecure_low_thread_count", 
+    "timeout_seconds": 600
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
diff --git a/tools/run_tests/performance/OWNERS b/tools/run_tests/performance/OWNERS
new file mode 100644
index 0000000..98c8152
--- /dev/null
+++ b/tools/run_tests/performance/OWNERS
@@ -0,0 +1,9 @@
+set noparent
+
+# These owners are in place to ensure that scenario_result_schema.json is not
+# modified without also running tools/run_tests/performance/patch_scenario_results_schema.py
+# to update the BigQuery schema
+
+@ncteisen
+@matt-kwong
+@ctiller
diff --git a/tools/run_tests/performance/bq_upload_result.py b/tools/run_tests/performance/bq_upload_result.py
index 31819d6..6702587 100755
--- a/tools/run_tests/performance/bq_upload_result.py
+++ b/tools/run_tests/performance/bq_upload_result.py
@@ -26,146 +26,161 @@
 import uuid
 import massage_qps_stats
 
-
-gcp_utils_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
 sys.path.append(gcp_utils_dir)
 import big_query_utils
 
-
-_PROJECT_ID='grpc-testing'
+_PROJECT_ID = 'grpc-testing'
 
 
 def _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id, result_file):
-  with open(result_file, 'r') as f:
-    (col1, col2, col3) = f.read().split(',')
-    latency50 = float(col1.strip()) * 1000
-    latency90 = float(col2.strip()) * 1000
-    latency99 = float(col3.strip()) * 1000
+    with open(result_file, 'r') as f:
+        (col1, col2, col3) = f.read().split(',')
+        latency50 = float(col1.strip()) * 1000
+        latency90 = float(col2.strip()) * 1000
+        latency99 = float(col3.strip()) * 1000
 
-    scenario_result = {
-        'scenario': {
-          'name': 'netperf_tcp_rr'
-        },
-        'summary': {
-          'latency50': latency50,
-          'latency90': latency90,
-          'latency99': latency99
+        scenario_result = {
+            'scenario': {
+                'name': 'netperf_tcp_rr'
+            },
+            'summary': {
+                'latency50': latency50,
+                'latency90': latency90,
+                'latency99': latency99
+            }
         }
-    }
 
-  bq = big_query_utils.create_big_query()
-  _create_results_table(bq, dataset_id, table_id)
+    bq = big_query_utils.create_big_query()
+    _create_results_table(bq, dataset_id, table_id)
 
-  if not _insert_result(bq, dataset_id, table_id, scenario_result, flatten=False):
-    print('Error uploading result to bigquery.')
-    sys.exit(1)
+    if not _insert_result(
+            bq, dataset_id, table_id, scenario_result, flatten=False):
+        print('Error uploading result to bigquery.')
+        sys.exit(1)
 
 
 def _upload_scenario_result_to_bigquery(dataset_id, table_id, result_file):
-  with open(result_file, 'r') as f:
-    scenario_result = json.loads(f.read())
+    with open(result_file, 'r') as f:
+        scenario_result = json.loads(f.read())
 
-  bq = big_query_utils.create_big_query()
-  _create_results_table(bq, dataset_id, table_id)
+    bq = big_query_utils.create_big_query()
+    _create_results_table(bq, dataset_id, table_id)
 
-  if not _insert_result(bq, dataset_id, table_id, scenario_result):
-    print('Error uploading result to bigquery.')
-    sys.exit(1)
+    if not _insert_result(bq, dataset_id, table_id, scenario_result):
+        print('Error uploading result to bigquery.')
+        sys.exit(1)
 
 
 def _insert_result(bq, dataset_id, table_id, scenario_result, flatten=True):
-  if flatten:
-    _flatten_result_inplace(scenario_result)
-  _populate_metadata_inplace(scenario_result)
-  row = big_query_utils.make_row(str(uuid.uuid4()), scenario_result)
-  return big_query_utils.insert_rows(bq,
-                                     _PROJECT_ID,
-                                     dataset_id,
-                                     table_id,
-                                     [row])
+    if flatten:
+        _flatten_result_inplace(scenario_result)
+    _populate_metadata_inplace(scenario_result)
+    row = big_query_utils.make_row(str(uuid.uuid4()), scenario_result)
+    return big_query_utils.insert_rows(bq, _PROJECT_ID, dataset_id, table_id,
+                                       [row])
 
 
 def _create_results_table(bq, dataset_id, table_id):
-  with open(os.path.dirname(__file__) + '/scenario_result_schema.json', 'r') as f:
-    table_schema = json.loads(f.read())
-  desc = 'Results of performance benchmarks.'
-  return big_query_utils.create_table2(bq, _PROJECT_ID, dataset_id,
-                               table_id, table_schema, desc)
+    with open(os.path.dirname(__file__) + '/scenario_result_schema.json',
+              'r') as f:
+        table_schema = json.loads(f.read())
+    desc = 'Results of performance benchmarks.'
+    return big_query_utils.create_table2(bq, _PROJECT_ID, dataset_id, table_id,
+                                         table_schema, desc)
 
 
 def _flatten_result_inplace(scenario_result):
-  """Bigquery is not really great for handling deeply nested data
+    """Bigquery is not really great for handling deeply nested data
   and repeated fields. To maintain values of some fields while keeping
   the schema relatively simple, we artificially leave some of the fields
   as JSON strings.
   """
-  scenario_result['scenario']['clientConfig'] = json.dumps(scenario_result['scenario']['clientConfig'])
-  scenario_result['scenario']['serverConfig'] = json.dumps(scenario_result['scenario']['serverConfig'])
-  scenario_result['latencies'] = json.dumps(scenario_result['latencies'])
-  scenario_result['serverCpuStats'] = []
-  for stats in scenario_result['serverStats']:
-    scenario_result['serverCpuStats'].append(dict())
-    scenario_result['serverCpuStats'][-1]['totalCpuTime'] = stats.pop('totalCpuTime', None)
-    scenario_result['serverCpuStats'][-1]['idleCpuTime'] = stats.pop('idleCpuTime', None)
-  for stats in scenario_result['clientStats']:
-    stats['latencies'] = json.dumps(stats['latencies'])
-    stats.pop('requestResults', None)
-  scenario_result['serverCores'] = json.dumps(scenario_result['serverCores'])
-  scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess'])
-  scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess'])
-  scenario_result['requestResults'] = json.dumps(scenario_result.get('requestResults', []))
-  scenario_result['serverCpuUsage'] = scenario_result['summary'].pop('serverCpuUsage', None)
-  scenario_result['summary'].pop('successfulRequestsPerSecond', None)
-  scenario_result['summary'].pop('failedRequestsPerSecond', None)
-  massage_qps_stats.massage_qps_stats(scenario_result)
+    scenario_result['scenario']['clientConfig'] = json.dumps(
+        scenario_result['scenario']['clientConfig'])
+    scenario_result['scenario']['serverConfig'] = json.dumps(
+        scenario_result['scenario']['serverConfig'])
+    scenario_result['latencies'] = json.dumps(scenario_result['latencies'])
+    scenario_result['serverCpuStats'] = []
+    for stats in scenario_result['serverStats']:
+        scenario_result['serverCpuStats'].append(dict())
+        scenario_result['serverCpuStats'][-1]['totalCpuTime'] = stats.pop(
+            'totalCpuTime', None)
+        scenario_result['serverCpuStats'][-1]['idleCpuTime'] = stats.pop(
+            'idleCpuTime', None)
+    for stats in scenario_result['clientStats']:
+        stats['latencies'] = json.dumps(stats['latencies'])
+        stats.pop('requestResults', None)
+    scenario_result['serverCores'] = json.dumps(scenario_result['serverCores'])
+    scenario_result['clientSuccess'] = json.dumps(
+        scenario_result['clientSuccess'])
+    scenario_result['serverSuccess'] = json.dumps(
+        scenario_result['serverSuccess'])
+    scenario_result['requestResults'] = json.dumps(
+        scenario_result.get('requestResults', []))
+    scenario_result['serverCpuUsage'] = scenario_result['summary'].pop(
+        'serverCpuUsage', None)
+    scenario_result['summary'].pop('successfulRequestsPerSecond', None)
+    scenario_result['summary'].pop('failedRequestsPerSecond', None)
+    massage_qps_stats.massage_qps_stats(scenario_result)
 
 
 def _populate_metadata_inplace(scenario_result):
-  """Populates metadata based on environment variables set by Jenkins."""
-  # NOTE: Grabbing the Jenkins environment variables will only work if the
-  # driver is running locally on the same machine where Jenkins has started
-  # the job. For our setup, this is currently the case, so just assume that.
-  build_number = os.getenv('BUILD_NUMBER')
-  build_url = os.getenv('BUILD_URL')
-  job_name = os.getenv('JOB_NAME')
-  git_commit = os.getenv('GIT_COMMIT')
-  # actual commit is the actual head of PR that is getting tested
-  git_actual_commit = os.getenv('ghprbActualCommit')
+    """Populates metadata based on environment variables set by Jenkins."""
+    # NOTE: Grabbing the Jenkins environment variables will only work if the
+    # driver is running locally on the same machine where Jenkins has started
+    # the job. For our setup, this is currently the case, so just assume that.
+    build_number = os.getenv('BUILD_NUMBER')
+    build_url = os.getenv('BUILD_URL')
+    job_name = os.getenv('JOB_NAME')
+    git_commit = os.getenv('GIT_COMMIT')
+    # actual commit is the actual head of PR that is getting tested
+    git_actual_commit = os.getenv('ghprbActualCommit')
 
-  utc_timestamp = str(calendar.timegm(time.gmtime()))
-  metadata = {'created': utc_timestamp}
+    utc_timestamp = str(calendar.timegm(time.gmtime()))
+    metadata = {'created': utc_timestamp}
 
-  if build_number:
-    metadata['buildNumber'] = build_number
-  if build_url:
-    metadata['buildUrl'] = build_url
-  if job_name:
-    metadata['jobName'] = job_name
-  if git_commit:
-    metadata['gitCommit'] = git_commit
-  if git_actual_commit:
-    metadata['gitActualCommit'] = git_actual_commit
+    if build_number:
+        metadata['buildNumber'] = build_number
+    if build_url:
+        metadata['buildUrl'] = build_url
+    if job_name:
+        metadata['jobName'] = job_name
+    if git_commit:
+        metadata['gitCommit'] = git_commit
+    if git_actual_commit:
+        metadata['gitActualCommit'] = git_actual_commit
 
-  scenario_result['metadata'] = metadata
+    scenario_result['metadata'] = metadata
 
 
 argp = argparse.ArgumentParser(description='Upload result to big query.')
-argp.add_argument('--bq_result_table', required=True, default=None, type=str,
-                  help='Bigquery "dataset.table" to upload results to.')
-argp.add_argument('--file_to_upload', default='scenario_result.json', type=str,
-                  help='Report file to upload.')
-argp.add_argument('--file_format',
-                  choices=['scenario_result','netperf_latency_csv'],
-                  default='scenario_result',
-                  help='Format of the file to upload.')
+argp.add_argument(
+    '--bq_result_table',
+    required=True,
+    default=None,
+    type=str,
+    help='Bigquery "dataset.table" to upload results to.')
+argp.add_argument(
+    '--file_to_upload',
+    default='scenario_result.json',
+    type=str,
+    help='Report file to upload.')
+argp.add_argument(
+    '--file_format',
+    choices=['scenario_result', 'netperf_latency_csv'],
+    default='scenario_result',
+    help='Format of the file to upload.')
 
 args = argp.parse_args()
 
 dataset_id, table_id = args.bq_result_table.split('.', 2)
 
 if args.file_format == 'netperf_latency_csv':
-  _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id, args.file_to_upload)
+    _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id,
+                                            args.file_to_upload)
 else:
-  _upload_scenario_result_to_bigquery(dataset_id, table_id, args.file_to_upload)
+    _upload_scenario_result_to_bigquery(dataset_id, table_id,
+                                        args.file_to_upload)
 print('Successfully uploaded %s to BigQuery.\n' % args.file_to_upload)
diff --git a/tools/run_tests/performance/massage_qps_stats.py b/tools/run_tests/performance/massage_qps_stats.py
index 48c5758..37f6e7a 100644
--- a/tools/run_tests/performance/massage_qps_stats.py
+++ b/tools/run_tests/performance/massage_qps_stats.py
@@ -15,182 +15,456 @@
 # Autogenerated by tools/codegen/core/gen_stats_data.py
 
 import massage_qps_stats_helpers
+
+
 def massage_qps_stats(scenario_result):
-  for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
-    if "coreStats" not in stats: return
-    core_stats = stats["coreStats"]
-    del stats["coreStats"]
-    stats["core_client_calls_created"] = massage_qps_stats_helpers.counter(core_stats, "client_calls_created")
-    stats["core_server_calls_created"] = massage_qps_stats_helpers.counter(core_stats, "server_calls_created")
-    stats["core_cqs_created"] = massage_qps_stats_helpers.counter(core_stats, "cqs_created")
-    stats["core_client_channels_created"] = massage_qps_stats_helpers.counter(core_stats, "client_channels_created")
-    stats["core_client_subchannels_created"] = massage_qps_stats_helpers.counter(core_stats, "client_subchannels_created")
-    stats["core_server_channels_created"] = massage_qps_stats_helpers.counter(core_stats, "server_channels_created")
-    stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(core_stats, "syscall_poll")
-    stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(core_stats, "syscall_wait")
-    stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick")
-    stats["core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kicked_without_poller")
-    stats["core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kicked_again")
-    stats["core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick_wakeup_fd")
-    stats["core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick_wakeup_cv")
-    stats["core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick_own_thread")
-    stats["core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(core_stats, "histogram_slow_lookups")
-    stats["core_syscall_write"] = massage_qps_stats_helpers.counter(core_stats, "syscall_write")
-    stats["core_syscall_read"] = massage_qps_stats_helpers.counter(core_stats, "syscall_read")
-    stats["core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(core_stats, "tcp_backup_pollers_created")
-    stats["core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(core_stats, "tcp_backup_poller_polls")
-    stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_batches")
-    stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_cancel")
-    stats["core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_send_initial_metadata")
-    stats["core_http2_op_send_message"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_send_message")
-    stats["core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_send_trailing_metadata")
-    stats["core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_recv_initial_metadata")
-    stats["core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_recv_message")
-    stats["core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_recv_trailing_metadata")
-    stats["core_http2_settings_writes"] = massage_qps_stats_helpers.counter(core_stats, "http2_settings_writes")
-    stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(core_stats, "http2_pings_sent")
-    stats["core_http2_writes_begun"] = massage_qps_stats_helpers.counter(core_stats, "http2_writes_begun")
-    stats["core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(core_stats, "http2_writes_offloaded")
-    stats["core_http2_writes_continued"] = massage_qps_stats_helpers.counter(core_stats, "http2_writes_continued")
-    stats["core_http2_partial_writes"] = massage_qps_stats_helpers.counter(core_stats, "http2_partial_writes")
-    stats["core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_initial_write")
-    stats["core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_start_new_stream")
-    stats["core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_message")
-    stats["core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_initial_metadata")
-    stats["core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_trailing_metadata")
-    stats["core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_retry_send_ping")
-    stats["core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_continue_pings")
-    stats["core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_goaway_sent")
-    stats["core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_rst_stream")
-    stats["core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_close_from_api")
-    stats["core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_stream_flow_control")
-    stats["core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_transport_flow_control")
-    stats["core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_settings")
-    stats["core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_bdp_estimator_ping")
-    stats["core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_flow_control_unstalled_by_setting")
-    stats["core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_flow_control_unstalled_by_update")
-    stats["core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_application_ping")
-    stats["core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_keepalive_ping")
-    stats["core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_transport_flow_control_unstalled")
-    stats["core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_ping_response")
-    stats["core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_force_rst_stream")
-    stats["core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(core_stats, "http2_spurious_writes_begun")
-    stats["core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_indexed")
-    stats["core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_incidx")
-    stats["core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_incidx_v")
-    stats["core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_notidx")
-    stats["core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_notidx_v")
-    stats["core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_nvridx")
-    stats["core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_nvridx_v")
-    stats["core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_uncompressed")
-    stats["core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_huffman")
-    stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_binary")
-    stats["core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_binary_base64")
-    stats["core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_indexed")
-    stats["core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_incidx")
-    stats["core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_incidx_v")
-    stats["core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_notidx")
-    stats["core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_notidx_v")
-    stats["core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_nvridx")
-    stats["core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_nvridx_v")
-    stats["core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_uncompressed")
-    stats["core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_huffman")
-    stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_binary")
-    stats["core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_binary_base64")
-    stats["core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_initiated")
-    stats["core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_scheduled_items")
-    stats["core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_scheduled_final_items")
-    stats["core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_offloaded")
-    stats["core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(core_stats, "call_combiner_locks_initiated")
-    stats["core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(core_stats, "call_combiner_locks_scheduled_items")
-    stats["core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(core_stats, "call_combiner_set_notify_on_cancel")
-    stats["core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(core_stats, "call_combiner_cancelled")
-    stats["core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(core_stats, "executor_scheduled_short_items")
-    stats["core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(core_stats, "executor_scheduled_long_items")
-    stats["core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(core_stats, "executor_scheduled_to_self")
-    stats["core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(core_stats, "executor_wakeup_initiated")
-    stats["core_executor_queue_drained"] = massage_qps_stats_helpers.counter(core_stats, "executor_queue_drained")
-    stats["core_executor_push_retries"] = massage_qps_stats_helpers.counter(core_stats, "executor_push_retries")
-    stats["core_server_requested_calls"] = massage_qps_stats_helpers.counter(core_stats, "server_requested_calls")
-    stats["core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(core_stats, "server_slowpath_requests_queued")
-    stats["core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(core_stats, "cq_ev_queue_trylock_failures")
-    stats["core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(core_stats, "cq_ev_queue_trylock_successes")
-    stats["core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(core_stats, "cq_ev_queue_transient_pop_failures")
-    h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size")
-    stats["core_call_initial_size"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_call_initial_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "poll_events_returned")
-    stats["core_poll_events_returned"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_poll_events_returned_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_size")
-    stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_tcp_write_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_iov_size")
-    stats["core_tcp_write_iov_size"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_tcp_write_iov_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
-    stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_tcp_read_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer")
-    stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_tcp_read_offer_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer_iov_size")
-    stats["core_tcp_read_offer_iov_size"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_tcp_read_offer_iov_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_message_size")
-    stats["core_http2_send_message_size"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_http2_send_message_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_initial_metadata_per_write")
-    stats["core_http2_send_initial_metadata_per_write"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_message_per_write")
-    stats["core_http2_send_message_per_write"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_http2_send_message_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_trailing_metadata_per_write")
-    stats["core_http2_send_trailing_metadata_per_write"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_http2_send_trailing_metadata_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_flowctl_per_write")
-    stats["core_http2_send_flowctl_per_write"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_http2_send_flowctl_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "server_cqs_checked")
-    stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_server_cqs_checked_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
+    for stats in scenario_result["serverStats"] + scenario_result[
+            "clientStats"]:
+        if "coreStats" not in stats: return
+        core_stats = stats["coreStats"]
+        del stats["coreStats"]
+        stats["core_client_calls_created"] = massage_qps_stats_helpers.counter(
+            core_stats, "client_calls_created")
+        stats["core_server_calls_created"] = massage_qps_stats_helpers.counter(
+            core_stats, "server_calls_created")
+        stats["core_cqs_created"] = massage_qps_stats_helpers.counter(
+            core_stats, "cqs_created")
+        stats[
+            "core_client_channels_created"] = massage_qps_stats_helpers.counter(
+                core_stats, "client_channels_created")
+        stats[
+            "core_client_subchannels_created"] = massage_qps_stats_helpers.counter(
+                core_stats, "client_subchannels_created")
+        stats[
+            "core_server_channels_created"] = massage_qps_stats_helpers.counter(
+                core_stats, "server_channels_created")
+        stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(
+            core_stats, "syscall_poll")
+        stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(
+            core_stats, "syscall_wait")
+        stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(
+            core_stats, "pollset_kick")
+        stats[
+            "core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(
+                core_stats, "pollset_kicked_without_poller")
+        stats["core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(
+            core_stats, "pollset_kicked_again")
+        stats[
+            "core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(
+                core_stats, "pollset_kick_wakeup_fd")
+        stats[
+            "core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(
+                core_stats, "pollset_kick_wakeup_cv")
+        stats[
+            "core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(
+                core_stats, "pollset_kick_own_thread")
+        stats[
+            "core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(
+                core_stats, "histogram_slow_lookups")
+        stats["core_syscall_write"] = massage_qps_stats_helpers.counter(
+            core_stats, "syscall_write")
+        stats["core_syscall_read"] = massage_qps_stats_helpers.counter(
+            core_stats, "syscall_read")
+        stats[
+            "core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(
+                core_stats, "tcp_backup_pollers_created")
+        stats[
+            "core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(
+                core_stats, "tcp_backup_poller_polls")
+        stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_op_batches")
+        stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_op_cancel")
+        stats[
+            "core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_op_send_initial_metadata")
+        stats["core_http2_op_send_message"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_op_send_message")
+        stats[
+            "core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_op_send_trailing_metadata")
+        stats[
+            "core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_op_recv_initial_metadata")
+        stats["core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_op_recv_message")
+        stats[
+            "core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_op_recv_trailing_metadata")
+        stats["core_http2_settings_writes"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_settings_writes")
+        stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_pings_sent")
+        stats["core_http2_writes_begun"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_writes_begun")
+        stats[
+            "core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_writes_offloaded")
+        stats[
+            "core_http2_writes_continued"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_writes_continued")
+        stats["core_http2_partial_writes"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_partial_writes")
+        stats[
+            "core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_initial_write")
+        stats[
+            "core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_start_new_stream")
+        stats[
+            "core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_send_message")
+        stats[
+            "core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_send_initial_metadata")
+        stats[
+            "core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
+                core_stats,
+                "http2_initiate_write_due_to_send_trailing_metadata")
+        stats[
+            "core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_retry_send_ping")
+        stats[
+            "core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_continue_pings")
+        stats[
+            "core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_goaway_sent")
+        stats[
+            "core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_rst_stream")
+        stats[
+            "core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_close_from_api")
+        stats[
+            "core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_stream_flow_control")
+        stats[
+            "core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(
+                core_stats,
+                "http2_initiate_write_due_to_transport_flow_control")
+        stats[
+            "core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_send_settings")
+        stats[
+            "core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_bdp_estimator_ping")
+        stats[
+            "core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(
+                core_stats,
+                "http2_initiate_write_due_to_flow_control_unstalled_by_setting")
+        stats[
+            "core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(
+                core_stats,
+                "http2_initiate_write_due_to_flow_control_unstalled_by_update")
+        stats[
+            "core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_application_ping")
+        stats[
+            "core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_keepalive_ping")
+        stats[
+            "core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(
+                core_stats,
+                "http2_initiate_write_due_to_transport_flow_control_unstalled")
+        stats[
+            "core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_ping_response")
+        stats[
+            "core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_force_rst_stream")
+        stats[
+            "core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_spurious_writes_begun")
+        stats["core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(
+            core_stats, "hpack_recv_indexed")
+        stats[
+            "core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_lithdr_incidx")
+        stats[
+            "core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_lithdr_incidx_v")
+        stats[
+            "core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_lithdr_notidx")
+        stats[
+            "core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_lithdr_notidx_v")
+        stats[
+            "core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_lithdr_nvridx")
+        stats[
+            "core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_lithdr_nvridx_v")
+        stats[
+            "core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_uncompressed")
+        stats["core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(
+            core_stats, "hpack_recv_huffman")
+        stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(
+            core_stats, "hpack_recv_binary")
+        stats[
+            "core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_binary_base64")
+        stats["core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(
+            core_stats, "hpack_send_indexed")
+        stats[
+            "core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_lithdr_incidx")
+        stats[
+            "core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_lithdr_incidx_v")
+        stats[
+            "core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_lithdr_notidx")
+        stats[
+            "core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_lithdr_notidx_v")
+        stats[
+            "core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_lithdr_nvridx")
+        stats[
+            "core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_lithdr_nvridx_v")
+        stats[
+            "core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_uncompressed")
+        stats["core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(
+            core_stats, "hpack_send_huffman")
+        stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(
+            core_stats, "hpack_send_binary")
+        stats[
+            "core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_binary_base64")
+        stats[
+            "core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
+                core_stats, "combiner_locks_initiated")
+        stats[
+            "core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
+                core_stats, "combiner_locks_scheduled_items")
+        stats[
+            "core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(
+                core_stats, "combiner_locks_scheduled_final_items")
+        stats[
+            "core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(
+                core_stats, "combiner_locks_offloaded")
+        stats[
+            "core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
+                core_stats, "call_combiner_locks_initiated")
+        stats[
+            "core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
+                core_stats, "call_combiner_locks_scheduled_items")
+        stats[
+            "core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(
+                core_stats, "call_combiner_set_notify_on_cancel")
+        stats[
+            "core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(
+                core_stats, "call_combiner_cancelled")
+        stats[
+            "core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(
+                core_stats, "executor_scheduled_short_items")
+        stats[
+            "core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(
+                core_stats, "executor_scheduled_long_items")
+        stats[
+            "core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(
+                core_stats, "executor_scheduled_to_self")
+        stats[
+            "core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(
+                core_stats, "executor_wakeup_initiated")
+        stats[
+            "core_executor_queue_drained"] = massage_qps_stats_helpers.counter(
+                core_stats, "executor_queue_drained")
+        stats["core_executor_push_retries"] = massage_qps_stats_helpers.counter(
+            core_stats, "executor_push_retries")
+        stats[
+            "core_server_requested_calls"] = massage_qps_stats_helpers.counter(
+                core_stats, "server_requested_calls")
+        stats[
+            "core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(
+                core_stats, "server_slowpath_requests_queued")
+        stats[
+            "core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(
+                core_stats, "cq_ev_queue_trylock_failures")
+        stats[
+            "core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(
+                core_stats, "cq_ev_queue_trylock_successes")
+        stats[
+            "core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(
+                core_stats, "cq_ev_queue_transient_pop_failures")
+        h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size")
+        stats["core_call_initial_size"] = ",".join("%f" % x for x in h.buckets)
+        stats["core_call_initial_size_bkts"] = ",".join("%f" % x
+                                                        for x in h.boundaries)
+        stats[
+            "core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats,
+                                                "poll_events_returned")
+        stats["core_poll_events_returned"] = ",".join("%f" % x
+                                                      for x in h.buckets)
+        stats["core_poll_events_returned_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_size")
+        stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
+        stats["core_tcp_write_size_bkts"] = ",".join("%f" % x
+                                                     for x in h.boundaries)
+        stats["core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 50, h.boundaries)
+        stats["core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 95, h.boundaries)
+        stats["core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats,
+                                                "tcp_write_iov_size")
+        stats["core_tcp_write_iov_size"] = ",".join("%f" % x for x in h.buckets)
+        stats["core_tcp_write_iov_size_bkts"] = ",".join("%f" % x
+                                                         for x in h.boundaries)
+        stats[
+            "core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
+        stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
+        stats["core_tcp_read_size_bkts"] = ",".join("%f" % x
+                                                    for x in h.boundaries)
+        stats["core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 50, h.boundaries)
+        stats["core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 95, h.boundaries)
+        stats["core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer")
+        stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
+        stats["core_tcp_read_offer_bkts"] = ",".join("%f" % x
+                                                     for x in h.boundaries)
+        stats["core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 50, h.boundaries)
+        stats["core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 95, h.boundaries)
+        stats["core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats,
+                                                "tcp_read_offer_iov_size")
+        stats["core_tcp_read_offer_iov_size"] = ",".join("%f" % x
+                                                         for x in h.buckets)
+        stats["core_tcp_read_offer_iov_size_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats,
+                                                "http2_send_message_size")
+        stats["core_http2_send_message_size"] = ",".join("%f" % x
+                                                         for x in h.buckets)
+        stats["core_http2_send_message_size_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(
+            core_stats, "http2_send_initial_metadata_per_write")
+        stats["core_http2_send_initial_metadata_per_write"] = ",".join(
+            "%f" % x for x in h.buckets)
+        stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats,
+                                                "http2_send_message_per_write")
+        stats["core_http2_send_message_per_write"] = ",".join(
+            "%f" % x for x in h.buckets)
+        stats["core_http2_send_message_per_write_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(
+            core_stats, "http2_send_trailing_metadata_per_write")
+        stats["core_http2_send_trailing_metadata_per_write"] = ",".join(
+            "%f" % x for x in h.buckets)
+        stats["core_http2_send_trailing_metadata_per_write_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats,
+                                                "http2_send_flowctl_per_write")
+        stats["core_http2_send_flowctl_per_write"] = ",".join(
+            "%f" % x for x in h.buckets)
+        stats["core_http2_send_flowctl_per_write_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats,
+                                                "server_cqs_checked")
+        stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets)
+        stats["core_server_cqs_checked_bkts"] = ",".join("%f" % x
+                                                         for x in h.boundaries)
+        stats[
+            "core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
diff --git a/tools/run_tests/performance/massage_qps_stats_helpers.py b/tools/run_tests/performance/massage_qps_stats_helpers.py
index a2fe4ae..108451c 100644
--- a/tools/run_tests/performance/massage_qps_stats_helpers.py
+++ b/tools/run_tests/performance/massage_qps_stats_helpers.py
@@ -14,44 +14,49 @@
 
 import collections
 
+
 def _threshold_for_count_below(buckets, boundaries, count_below):
-  count_so_far = 0
-  for lower_idx in range(0, len(buckets)):
-    count_so_far += buckets[lower_idx]
-    if count_so_far >= count_below:
-      break
-  if count_so_far == count_below:
-    # this bucket hits the threshold exactly... we should be midway through
-    # any run of zero values following the bucket
-    for upper_idx in range(lower_idx + 1, len(buckets)):
-      if buckets[upper_idx] != 0:
-        break
-    return (boundaries[lower_idx] + boundaries[upper_idx]) / 2.0
-  else:
-    # treat values as uniform throughout the bucket, and find where this value
-    # should lie
-    lower_bound = boundaries[lower_idx]
-    upper_bound = boundaries[lower_idx + 1]
-    return (upper_bound -
-           (upper_bound - lower_bound) * (count_so_far - count_below) /
-               float(buckets[lower_idx]))
+    count_so_far = 0
+    for lower_idx in range(0, len(buckets)):
+        count_so_far += buckets[lower_idx]
+        if count_so_far >= count_below:
+            break
+    if count_so_far == count_below:
+        # this bucket hits the threshold exactly... we should be midway through
+        # any run of zero values following the bucket
+        for upper_idx in range(lower_idx + 1, len(buckets)):
+            if buckets[upper_idx] != 0:
+                break
+        return (boundaries[lower_idx] + boundaries[upper_idx]) / 2.0
+    else:
+        # treat values as uniform throughout the bucket, and find where this value
+        # should lie
+        lower_bound = boundaries[lower_idx]
+        upper_bound = boundaries[lower_idx + 1]
+        return (upper_bound - (upper_bound - lower_bound) *
+                (count_so_far - count_below) / float(buckets[lower_idx]))
+
 
 def percentile(buckets, pctl, boundaries):
-  return _threshold_for_count_below(
-      buckets, boundaries, sum(buckets) * pctl / 100.0)
+    return _threshold_for_count_below(buckets, boundaries,
+                                      sum(buckets) * pctl / 100.0)
+
 
 def counter(core_stats, name):
-  for stat in core_stats['metrics']:
-    if stat['name'] == name:
-      return int(stat.get('count', 0))
+    for stat in core_stats['metrics']:
+        if stat['name'] == name:
+            return int(stat.get('count', 0))
+
 
 Histogram = collections.namedtuple('Histogram', 'buckets boundaries')
+
+
 def histogram(core_stats, name):
-  for stat in core_stats['metrics']:
-    if stat['name'] == name:
-      buckets = []
-      boundaries = []
-      for b in stat['histogram']['buckets']:
-        buckets.append(int(b.get('count', 0)))
-        boundaries.append(int(b.get('start', 0)))
-  return Histogram(buckets=buckets, boundaries=boundaries)
+    for stat in core_stats['metrics']:
+        if stat['name'] == name:
+            buckets = []
+            boundaries = []
+            for b in stat['histogram']['buckets']:
+                buckets.append(int(b.get('count', 0)))
+                boundaries.append(int(b.get('start', 0)))
+    return Histogram(buckets=buckets, boundaries=boundaries)
diff --git a/tools/run_tests/performance/patch_scenario_results_schema.py b/tools/run_tests/performance/patch_scenario_results_schema.py
index 81ba538..2a2aadc 100755
--- a/tools/run_tests/performance/patch_scenario_results_schema.py
+++ b/tools/run_tests/performance/patch_scenario_results_schema.py
@@ -25,27 +25,32 @@
 import time
 import uuid
 
-
-gcp_utils_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
 sys.path.append(gcp_utils_dir)
 import big_query_utils
 
+_PROJECT_ID = 'grpc-testing'
 
-_PROJECT_ID='grpc-testing'
 
 def _patch_results_table(dataset_id, table_id):
-  bq = big_query_utils.create_big_query()
-  with open(os.path.dirname(__file__) + '/scenario_result_schema.json', 'r') as f:
-    table_schema = json.loads(f.read())
-  desc = 'Results of performance benchmarks.'
-  return big_query_utils.patch_table(bq, _PROJECT_ID, dataset_id,
-                                     table_id, table_schema)
+    bq = big_query_utils.create_big_query()
+    with open(os.path.dirname(__file__) + '/scenario_result_schema.json',
+              'r') as f:
+        table_schema = json.loads(f.read())
+    desc = 'Results of performance benchmarks.'
+    return big_query_utils.patch_table(bq, _PROJECT_ID, dataset_id, table_id,
+                                       table_schema)
 
 
-argp = argparse.ArgumentParser(description='Patch schema of scenario results table.')
-argp.add_argument('--bq_result_table', required=True, default=None, type=str,
-                  help='Bigquery "dataset.table" to patch.')
+argp = argparse.ArgumentParser(
+    description='Patch schema of scenario results table.')
+argp.add_argument(
+    '--bq_result_table',
+    required=True,
+    default=None,
+    type=str,
+    help='Bigquery "dataset.table" to patch.')
 
 args = argp.parse_args()
 
diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py
index cafac3d..7af33f9 100644
--- a/tools/run_tests/performance/scenario_config.py
+++ b/tools/run_tests/performance/scenario_config.py
@@ -16,66 +16,64 @@
 
 import math
 
-WARMUP_SECONDS=5
-JAVA_WARMUP_SECONDS=15  # Java needs more warmup time for JIT to kick in.
-BENCHMARK_SECONDS=30
+WARMUP_SECONDS = 5
+JAVA_WARMUP_SECONDS = 15  # Java needs more warmup time for JIT to kick in.
+BENCHMARK_SECONDS = 30
 
-SMOKETEST='smoketest'
-SCALABLE='scalable'
-INPROC='inproc'
-SWEEP='sweep'
-DEFAULT_CATEGORIES=[SCALABLE, SMOKETEST]
+SMOKETEST = 'smoketest'
+SCALABLE = 'scalable'
+INPROC = 'inproc'
+SWEEP = 'sweep'
+DEFAULT_CATEGORIES = [SCALABLE, SMOKETEST]
 
-SECURE_SECARGS = {'use_test_ca': True,
-                  'server_host_override': 'foo.test.google.fr'}
+SECURE_SECARGS = {
+    'use_test_ca': True,
+    'server_host_override': 'foo.test.google.fr'
+}
 
 HISTOGRAM_PARAMS = {
-  'resolution': 0.01,
-  'max_possible': 60e9,
+    'resolution': 0.01,
+    'max_possible': 60e9,
 }
 
 # target number of RPCs outstanding on across all client channels in
 # non-ping-pong tests (since we can only specify per-channel numbers, the
 # actual target will be slightly higher)
-OUTSTANDING_REQUESTS={
-    'async': 6400,
-    'async-limited': 800,
-    'sync': 1000
-}
+OUTSTANDING_REQUESTS = {'async': 6400, 'async-limited': 800, 'sync': 1000}
 
 # wide is the number of client channels in multi-channel tests (1 otherwise)
-WIDE=64
+WIDE = 64
 
 
 def _get_secargs(is_secure):
-  if is_secure:
-    return SECURE_SECARGS
-  else:
-    return None
+    if is_secure:
+        return SECURE_SECARGS
+    else:
+        return None
 
 
 def remove_nonproto_fields(scenario):
-  """Remove special-purpose that contains some extra info about the scenario
+    """Remove special-purpose that contains some extra info about the scenario
   but don't belong to the ScenarioConfig protobuf message"""
-  scenario.pop('CATEGORIES', None)
-  scenario.pop('CLIENT_LANGUAGE', None)
-  scenario.pop('SERVER_LANGUAGE', None)
-  scenario.pop('EXCLUDED_POLL_ENGINES', None)
-  return scenario
+    scenario.pop('CATEGORIES', None)
+    scenario.pop('CLIENT_LANGUAGE', None)
+    scenario.pop('SERVER_LANGUAGE', None)
+    scenario.pop('EXCLUDED_POLL_ENGINES', None)
+    return scenario
 
 
 def geometric_progression(start, stop, step):
-  n = start
-  while n < stop:
-    yield int(round(n))
-    n *= step
+    n = start
+    while n < stop:
+        yield int(round(n))
+        n *= step
 
 
 def _payload_type(use_generic_payload, req_size, resp_size):
     r = {}
     sizes = {
-      'req_size': req_size,
-      'resp_size': resp_size,
+        'req_size': req_size,
+        'resp_size': resp_size,
     }
     if use_generic_payload:
         r['bytebuf_params'] = sizes
@@ -83,6 +81,7 @@
         r['simple_params'] = sizes
     return r
 
+
 def _load_params(offered_load):
     r = {}
     if offered_load is None:
@@ -93,21 +92,25 @@
         r['poisson'] = load
     return r
 
-def _add_channel_arg(config, key, value):
-  if 'channel_args' in config:
-    channel_args = config['channel_args']
-  else:
-    channel_args = []
-    config['channel_args'] = channel_args
-  arg = {'name': key}
-  if isinstance(value, int):
-    arg['int_value'] = value
-  else:
-    arg['str_value'] = value
-  channel_args.append(arg)
 
-def _ping_pong_scenario(name, rpc_type,
-                        client_type, server_type,
+def _add_channel_arg(config, key, value):
+    if 'channel_args' in config:
+        channel_args = config['channel_args']
+    else:
+        channel_args = []
+        config['channel_args'] = channel_args
+    arg = {'name': key}
+    if isinstance(value, int):
+        arg['int_value'] = value
+    else:
+        arg['str_value'] = value
+    channel_args.append(arg)
+
+
+def _ping_pong_scenario(name,
+                        rpc_type,
+                        client_type,
+                        server_type,
                         secure=True,
                         use_generic_payload=False,
                         req_size=0,
@@ -128,824 +131,1033 @@
                         excluded_poll_engines=[],
                         minimal_stack=False,
                         offered_load=None):
-  """Creates a basic ping pong scenario."""
-  scenario = {
-    'name': name,
-    'num_servers': 1,
-    'num_clients': 1,
-    'client_config': {
-      'client_type': client_type,
-      'security_params': _get_secargs(secure),
-      'outstanding_rpcs_per_channel': 1,
-      'client_channels': 1,
-      'async_client_threads': 1,
-      'threads_per_cq': client_threads_per_cq,
-      'rpc_type': rpc_type,
-      'histogram_params': HISTOGRAM_PARAMS,
-      'channel_args': [],
-    },
-    'server_config': {
-      'server_type': server_type,
-      'security_params': _get_secargs(secure),
-      'async_server_threads': async_server_threads,
-      'threads_per_cq': server_threads_per_cq,
-      'channel_args': [],
-    },
-    'warmup_seconds': warmup_seconds,
-    'benchmark_seconds': BENCHMARK_SECONDS
-  }
-  if resource_quota_size:
-    scenario['server_config']['resource_quota_size'] = resource_quota_size
-  if use_generic_payload:
-    if server_type != 'ASYNC_GENERIC_SERVER':
-      raise Exception('Use ASYNC_GENERIC_SERVER for generic payload.')
-    scenario['server_config']['payload_config'] = _payload_type(use_generic_payload, req_size, resp_size)
+    """Creates a basic ping pong scenario."""
+    scenario = {
+        'name': name,
+        'num_servers': 1,
+        'num_clients': 1,
+        'client_config': {
+            'client_type': client_type,
+            'security_params': _get_secargs(secure),
+            'outstanding_rpcs_per_channel': 1,
+            'client_channels': 1,
+            'async_client_threads': 1,
+            'threads_per_cq': client_threads_per_cq,
+            'rpc_type': rpc_type,
+            'histogram_params': HISTOGRAM_PARAMS,
+            'channel_args': [],
+        },
+        'server_config': {
+            'server_type': server_type,
+            'security_params': _get_secargs(secure),
+            'async_server_threads': async_server_threads,
+            'threads_per_cq': server_threads_per_cq,
+            'channel_args': [],
+        },
+        'warmup_seconds': warmup_seconds,
+        'benchmark_seconds': BENCHMARK_SECONDS
+    }
+    if resource_quota_size:
+        scenario['server_config']['resource_quota_size'] = resource_quota_size
+    if use_generic_payload:
+        if server_type != 'ASYNC_GENERIC_SERVER':
+            raise Exception('Use ASYNC_GENERIC_SERVER for generic payload.')
+        scenario['server_config']['payload_config'] = _payload_type(
+            use_generic_payload, req_size, resp_size)
 
-  scenario['client_config']['payload_config'] = _payload_type(use_generic_payload, req_size, resp_size)
+    scenario['client_config']['payload_config'] = _payload_type(
+        use_generic_payload, req_size, resp_size)
 
-  # Optimization target of 'throughput' does not work well with epoll1 polling
-  # engine. Use the default value of 'blend'
-  optimization_target = 'throughput'
+    # Optimization target of 'throughput' does not work well with epoll1 polling
+    # engine. Use the default value of 'blend'
+    optimization_target = 'throughput'
 
-  if unconstrained_client:
-    outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[unconstrained_client]
-    # clamp buffer usage to something reasonable (16 gig for now)
-    MAX_MEMORY_USE = 16 * 1024 * 1024 * 1024
-    if outstanding_calls * max(req_size, resp_size) > MAX_MEMORY_USE:
-        outstanding_calls = max(1, MAX_MEMORY_USE / max(req_size, resp_size))
-    wide = channels if channels is not None else WIDE
-    deep = int(math.ceil(1.0 * outstanding_calls / wide))
+    if unconstrained_client:
+        outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[
+            unconstrained_client]
+        # clamp buffer usage to something reasonable (16 gig for now)
+        MAX_MEMORY_USE = 16 * 1024 * 1024 * 1024
+        if outstanding_calls * max(req_size, resp_size) > MAX_MEMORY_USE:
+            outstanding_calls = max(1, MAX_MEMORY_USE / max(req_size,
+                                                            resp_size))
+        wide = channels if channels is not None else WIDE
+        deep = int(math.ceil(1.0 * outstanding_calls / wide))
 
-    scenario['num_clients'] = num_clients if num_clients is not None else 0  # use as many clients as available.
-    scenario['client_config']['outstanding_rpcs_per_channel'] = deep
-    scenario['client_config']['client_channels'] = wide
-    scenario['client_config']['async_client_threads'] = 0
-    if offered_load is not None:
+        scenario[
+            'num_clients'] = num_clients if num_clients is not None else 0  # use as many clients as available.
+        scenario['client_config']['outstanding_rpcs_per_channel'] = deep
+        scenario['client_config']['client_channels'] = wide
+        scenario['client_config']['async_client_threads'] = 0
+        if offered_load is not None:
+            optimization_target = 'latency'
+    else:
+        scenario['client_config']['outstanding_rpcs_per_channel'] = 1
+        scenario['client_config']['client_channels'] = 1
+        scenario['client_config']['async_client_threads'] = 1
         optimization_target = 'latency'
-  else:
-    scenario['client_config']['outstanding_rpcs_per_channel'] = 1
-    scenario['client_config']['client_channels'] = 1
-    scenario['client_config']['async_client_threads'] = 1
-    optimization_target = 'latency'
 
-  scenario['client_config']['load_params'] = _load_params(offered_load)
+    scenario['client_config']['load_params'] = _load_params(offered_load)
 
-  optimization_channel_arg = {
-    'name': 'grpc.optimization_target',
-    'str_value': optimization_target
-  }
-  scenario['client_config']['channel_args'].append(optimization_channel_arg)
-  scenario['server_config']['channel_args'].append(optimization_channel_arg)
+    optimization_channel_arg = {
+        'name': 'grpc.optimization_target',
+        'str_value': optimization_target
+    }
+    scenario['client_config']['channel_args'].append(optimization_channel_arg)
+    scenario['server_config']['channel_args'].append(optimization_channel_arg)
 
-  if minimal_stack:
-    _add_channel_arg(scenario['client_config'], 'grpc.minimal_stack', 1)
-    _add_channel_arg(scenario['server_config'], 'grpc.minimal_stack', 1)
+    if minimal_stack:
+        _add_channel_arg(scenario['client_config'], 'grpc.minimal_stack', 1)
+        _add_channel_arg(scenario['server_config'], 'grpc.minimal_stack', 1)
 
-  if messages_per_stream:
-    scenario['client_config']['messages_per_stream'] = messages_per_stream
-  if client_language:
-    # the CLIENT_LANGUAGE field is recognized by run_performance_tests.py
-    scenario['CLIENT_LANGUAGE'] = client_language
-  if server_language:
-    # the SERVER_LANGUAGE field is recognized by run_performance_tests.py
-    scenario['SERVER_LANGUAGE'] = server_language
-  if categories:
-    scenario['CATEGORIES'] = categories
-  if len(excluded_poll_engines):
-    # The polling engines for which this scenario is excluded
-    scenario['EXCLUDED_POLL_ENGINES'] = excluded_poll_engines
-  return scenario
+    if messages_per_stream:
+        scenario['client_config']['messages_per_stream'] = messages_per_stream
+    if client_language:
+        # the CLIENT_LANGUAGE field is recognized by run_performance_tests.py
+        scenario['CLIENT_LANGUAGE'] = client_language
+    if server_language:
+        # the SERVER_LANGUAGE field is recognized by run_performance_tests.py
+        scenario['SERVER_LANGUAGE'] = server_language
+    if categories:
+        scenario['CATEGORIES'] = categories
+    if len(excluded_poll_engines):
+        # The polling engines for which this scenario is excluded
+        scenario['EXCLUDED_POLL_ENGINES'] = excluded_poll_engines
+    return scenario
 
 
 class CXXLanguage:
 
-  def __init__(self):
-    self.safename = 'cxx'
+    def __init__(self):
+        self.safename = 'cxx'
 
-  def worker_cmdline(self):
-    return ['bins/opt/qps_worker']
+    def worker_cmdline(self):
+        return ['bins/opt/qps_worker']
 
-  def worker_port_offset(self):
-    return 0
+    def worker_port_offset(self):
+        return 0
 
-  def scenarios(self):
-    # TODO(ctiller): add 70% load latency test
-    yield _ping_pong_scenario(
-      'cpp_protobuf_async_unary_1channel_100rpcs_1MB', rpc_type='UNARY',
-      client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-      req_size=1024*1024, resp_size=1024*1024,
-      unconstrained_client='async', outstanding=100, channels=1,
-      num_clients=1,
-      secure=False,
-      categories=[SMOKETEST] + [INPROC] + [SCALABLE])
-
-    yield _ping_pong_scenario(
-      'cpp_protobuf_async_streaming_from_client_1channel_1MB', rpc_type='STREAMING_FROM_CLIENT',
-      client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-      req_size=1024*1024, resp_size=1024*1024,
-      unconstrained_client='async', outstanding=1, channels=1,
-      num_clients=1,
-      secure=False,
-      categories=[SMOKETEST] + [INPROC] + [SCALABLE])
-
-    yield _ping_pong_scenario(
-       'cpp_protobuf_async_unary_75Kqps_600channel_60Krpcs_300Breq_50Bresp',
-       rpc_type='UNARY', client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-       req_size=300, resp_size=50,
-       unconstrained_client='async', outstanding=30000, channels=300,
-       offered_load=37500, secure=False,
-       async_server_threads=16, server_threads_per_cq=1,
-       categories=[SMOKETEST] + [SCALABLE])
-
-    for secure in [True, False]:
-      secstr = 'secure' if secure else 'insecure'
-      smoketest_categories = ([SMOKETEST] if secure else [INPROC]) + [SCALABLE]
-
-      yield _ping_pong_scenario(
-          'cpp_generic_async_streaming_ping_pong_%s' % secstr,
-          rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_GENERIC_SERVER',
-          use_generic_payload=True, async_server_threads=1,
-          secure=secure,
-          categories=smoketest_categories)
-
-      yield _ping_pong_scenario(
-          'cpp_generic_async_streaming_qps_unconstrained_%s' % secstr,
-          rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async', use_generic_payload=True,
-          secure=secure,
-          minimal_stack=not secure,
-          categories=smoketest_categories+[SCALABLE])
-
-      for mps in geometric_progression(1, 20, 10):
+    def scenarios(self):
+        # TODO(ctiller): add 70% load latency test
         yield _ping_pong_scenario(
-            'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' % (mps, secstr),
-            rpc_type='STREAMING',
+            'cpp_protobuf_async_unary_1channel_100rpcs_1MB',
+            rpc_type='UNARY',
             client_type='ASYNC_CLIENT',
-            server_type='ASYNC_GENERIC_SERVER',
-            unconstrained_client='async', use_generic_payload=True,
-            secure=secure, messages_per_stream=mps,
-            minimal_stack=not secure,
-            categories=smoketest_categories+[SCALABLE])
+            server_type='ASYNC_SERVER',
+            req_size=1024 * 1024,
+            resp_size=1024 * 1024,
+            unconstrained_client='async',
+            outstanding=100,
+            channels=1,
+            num_clients=1,
+            secure=False,
+            categories=[SMOKETEST] + [INPROC] + [SCALABLE])
 
-      for mps in geometric_progression(1, 200, math.sqrt(10)):
         yield _ping_pong_scenario(
-            'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' % (mps, secstr),
-            rpc_type='STREAMING',
+            'cpp_protobuf_async_streaming_from_client_1channel_1MB',
+            rpc_type='STREAMING_FROM_CLIENT',
             client_type='ASYNC_CLIENT',
-            server_type='ASYNC_GENERIC_SERVER',
-            unconstrained_client='async', use_generic_payload=True,
-            secure=secure, messages_per_stream=mps,
-            minimal_stack=not secure,
-            categories=[SWEEP])
+            server_type='ASYNC_SERVER',
+            req_size=1024 * 1024,
+            resp_size=1024 * 1024,
+            unconstrained_client='async',
+            outstanding=1,
+            channels=1,
+            num_clients=1,
+            secure=False,
+            categories=[SMOKETEST] + [INPROC] + [SCALABLE])
 
-      yield _ping_pong_scenario(
-          'cpp_generic_async_streaming_qps_1channel_1MBmsg_%s' % secstr,
-          rpc_type='STREAMING',
-          req_size=1024*1024,
-          resp_size=1024*1024,
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async', use_generic_payload=True,
-          secure=secure,
-          minimal_stack=not secure,
-          categories=smoketest_categories+[SCALABLE],
-          channels=1, outstanding=100)
+        yield _ping_pong_scenario(
+            'cpp_protobuf_async_unary_75Kqps_600channel_60Krpcs_300Breq_50Bresp',
+            rpc_type='UNARY',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            req_size=300,
+            resp_size=50,
+            unconstrained_client='async',
+            outstanding=30000,
+            channels=300,
+            offered_load=37500,
+            secure=False,
+            async_server_threads=16,
+            server_threads_per_cq=1,
+            categories=[SMOKETEST] + [SCALABLE])
 
-      yield _ping_pong_scenario(
-          'cpp_generic_async_streaming_qps_unconstrained_64KBmsg_%s' % secstr,
-          rpc_type='STREAMING',
-          req_size=64*1024,
-          resp_size=64*1024,
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async', use_generic_payload=True,
-          secure=secure,
-          minimal_stack=not secure,
-          categories=smoketest_categories+[SCALABLE])
+        for secure in [True, False]:
+            secstr = 'secure' if secure else 'insecure'
+            smoketest_categories = ([SMOKETEST]
+                                    if secure else [INPROC]) + [SCALABLE]
 
-      # TODO(https://github.com/grpc/grpc/issues/11500) Re-enable this test
-      #yield _ping_pong_scenario(
-      #    'cpp_generic_async_streaming_qps_unconstrained_1cq_%s' % secstr,
-      #    rpc_type='STREAMING',
-      #    client_type='ASYNC_CLIENT',
-      #    server_type='ASYNC_GENERIC_SERVER',
-      #    unconstrained_client='async-limited', use_generic_payload=True,
-      #    secure=secure,
-      #    client_threads_per_cq=1000000, server_threads_per_cq=1000000,
-      #    categories=smoketest_categories+[SCALABLE])
+            yield _ping_pong_scenario(
+                'cpp_generic_async_streaming_ping_pong_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                use_generic_payload=True,
+                async_server_threads=1,
+                secure=secure,
+                categories=smoketest_categories)
 
-      yield _ping_pong_scenario(
-          'cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_%s' % secstr,
-          rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async', use_generic_payload=True,
-          secure=secure,
-          client_threads_per_cq=2, server_threads_per_cq=2,
-          categories=smoketest_categories+[SCALABLE])
+            yield _ping_pong_scenario(
+                'cpp_generic_async_streaming_qps_unconstrained_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async',
+                use_generic_payload=True,
+                secure=secure,
+                minimal_stack=not secure,
+                categories=smoketest_categories + [SCALABLE])
 
-      #yield _ping_pong_scenario(
-      #    'cpp_protobuf_async_streaming_qps_unconstrained_1cq_%s' % secstr,
-      #    rpc_type='STREAMING',
-      #    client_type='ASYNC_CLIENT',
-      #    server_type='ASYNC_SERVER',
-      #    unconstrained_client='async-limited',
-      #    secure=secure,
-      #    client_threads_per_cq=1000000, server_threads_per_cq=1000000,
-      #    categories=smoketest_categories+[SCALABLE])
-
-      yield _ping_pong_scenario(
-          'cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_%s' % secstr,
-          rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure,
-          client_threads_per_cq=2, server_threads_per_cq=2,
-          categories=smoketest_categories+[SCALABLE])
-
-      #yield _ping_pong_scenario(
-      #    'cpp_protobuf_async_unary_qps_unconstrained_1cq_%s' % secstr,
-      #    rpc_type='UNARY',
-      #    client_type='ASYNC_CLIENT',
-      #    server_type='ASYNC_SERVER',
-      #    unconstrained_client='async-limited',
-      #    secure=secure,
-      #    client_threads_per_cq=1000000, server_threads_per_cq=1000000,
-      #    categories=smoketest_categories+[SCALABLE])
-
-      yield _ping_pong_scenario(
-          'cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_%s' % secstr,
-          rpc_type='UNARY',
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure,
-          client_threads_per_cq=2, server_threads_per_cq=2,
-          categories=smoketest_categories+[SCALABLE])
-
-      yield _ping_pong_scenario(
-          'cpp_generic_async_streaming_qps_one_server_core_%s' % secstr,
-          rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async-limited', use_generic_payload=True,
-          async_server_threads=1,
-          minimal_stack=not secure,
-          secure=secure)
-
-      yield _ping_pong_scenario(
-          'cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_%s' %
-          (secstr),
-          rpc_type='UNARY',
-          client_type='ASYNC_CLIENT',
-          server_type='SYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure,
-          minimal_stack=not secure,
-          categories=smoketest_categories + [SCALABLE],
-          excluded_poll_engines = ['poll-cv'])
-
-      yield _ping_pong_scenario(
-          'cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_%s' %
-          (secstr),
-          rpc_type='UNARY',
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_SERVER',
-          channels=1,
-          outstanding=64,
-          req_size=128,
-          resp_size=8*1024*1024,
-          secure=secure,
-          minimal_stack=not secure,
-          categories=smoketest_categories + [SCALABLE])
-
-      yield _ping_pong_scenario(
-          'cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_%s' % secstr,
-          rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT',
-          server_type='SYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure,
-          minimal_stack=not secure,
-          categories=smoketest_categories+[SCALABLE],
-          excluded_poll_engines = ['poll-cv'])
-
-      yield _ping_pong_scenario(
-        'cpp_protobuf_async_unary_ping_pong_%s_1MB' % secstr, rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        req_size=1024*1024, resp_size=1024*1024,
-        secure=secure,
-        minimal_stack=not secure,
-        categories=smoketest_categories + [SCALABLE])
-
-      for rpc_type in ['unary', 'streaming', 'streaming_from_client', 'streaming_from_server']:
-        for synchronicity in ['sync', 'async']:
-          yield _ping_pong_scenario(
-              'cpp_protobuf_%s_%s_ping_pong_%s' % (synchronicity, rpc_type, secstr),
-              rpc_type=rpc_type.upper(),
-              client_type='%s_CLIENT' % synchronicity.upper(),
-              server_type='%s_SERVER' % synchronicity.upper(),
-              async_server_threads=1,
-              minimal_stack=not secure,
-              secure=secure)
-
-          for size in geometric_progression(1, 1024*1024*1024+1, 8):
-              yield _ping_pong_scenario(
-                  'cpp_protobuf_%s_%s_qps_unconstrained_%s_%db' % (synchronicity, rpc_type, secstr, size),
-                  rpc_type=rpc_type.upper(),
-                  req_size=size,
-                  resp_size=size,
-                  client_type='%s_CLIENT' % synchronicity.upper(),
-                  server_type='%s_SERVER' % synchronicity.upper(),
-                  unconstrained_client=synchronicity,
-                  secure=secure,
-                  minimal_stack=not secure,
-                  categories=[SWEEP])
-
-          yield _ping_pong_scenario(
-              'cpp_protobuf_%s_%s_qps_unconstrained_%s' % (synchronicity, rpc_type, secstr),
-              rpc_type=rpc_type.upper(),
-              client_type='%s_CLIENT' % synchronicity.upper(),
-              server_type='%s_SERVER' % synchronicity.upper(),
-              unconstrained_client=synchronicity,
-              secure=secure,
-              minimal_stack=not secure,
-              server_threads_per_cq=3,
-              client_threads_per_cq=3,
-              categories=smoketest_categories+[SCALABLE])
-
-          # TODO(vjpai): Re-enable this test. It has a lot of timeouts
-          # and hasn't yet been conclusively identified as a test failure
-          # or race in the library
-          # yield _ping_pong_scenario(
-          #     'cpp_protobuf_%s_%s_qps_unconstrained_%s_500kib_resource_quota' % (synchronicity, rpc_type, secstr),
-          #     rpc_type=rpc_type.upper(),
-          #     client_type='%s_CLIENT' % synchronicity.upper(),
-          #     server_type='%s_SERVER' % synchronicity.upper(),
-          #     unconstrained_client=synchronicity,
-          #     secure=secure,
-          #     categories=smoketest_categories+[SCALABLE],
-          #     resource_quota_size=500*1024)
-
-          if rpc_type == 'streaming':
             for mps in geometric_progression(1, 20, 10):
-              yield _ping_pong_scenario(
-                  'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s' % (synchronicity, rpc_type, mps, secstr),
-                  rpc_type=rpc_type.upper(),
-                  client_type='%s_CLIENT' % synchronicity.upper(),
-                  server_type='%s_SERVER' % synchronicity.upper(),
-                  unconstrained_client=synchronicity,
-                  secure=secure, messages_per_stream=mps,
-                  minimal_stack=not secure,
-                  categories=smoketest_categories+[SCALABLE])
+                yield _ping_pong_scenario(
+                    'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' %
+                    (mps, secstr),
+                    rpc_type='STREAMING',
+                    client_type='ASYNC_CLIENT',
+                    server_type='ASYNC_GENERIC_SERVER',
+                    unconstrained_client='async',
+                    use_generic_payload=True,
+                    secure=secure,
+                    messages_per_stream=mps,
+                    minimal_stack=not secure,
+                    categories=smoketest_categories + [SCALABLE])
 
             for mps in geometric_progression(1, 200, math.sqrt(10)):
-              yield _ping_pong_scenario(
-                  'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s' % (synchronicity, rpc_type, mps, secstr),
-                  rpc_type=rpc_type.upper(),
-                  client_type='%s_CLIENT' % synchronicity.upper(),
-                  server_type='%s_SERVER' % synchronicity.upper(),
-                  unconstrained_client=synchronicity,
-                  secure=secure, messages_per_stream=mps,
-                  minimal_stack=not secure,
-                  categories=[SWEEP])
-
-          for channels in geometric_progression(1, 20000, math.sqrt(10)):
-            for outstanding in geometric_progression(1, 200000, math.sqrt(10)):
-                if synchronicity == 'sync' and outstanding > 1200: continue
-                if outstanding < channels: continue
                 yield _ping_pong_scenario(
-                    'cpp_protobuf_%s_%s_qps_unconstrained_%s_%d_channels_%d_outstanding' % (synchronicity, rpc_type, secstr, channels, outstanding),
-                    rpc_type=rpc_type.upper(),
-                    client_type='%s_CLIENT' % synchronicity.upper(),
-                    server_type='%s_SERVER' % synchronicity.upper(),
-                    unconstrained_client=synchronicity, secure=secure,
+                    'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' %
+                    (mps, secstr),
+                    rpc_type='STREAMING',
+                    client_type='ASYNC_CLIENT',
+                    server_type='ASYNC_GENERIC_SERVER',
+                    unconstrained_client='async',
+                    use_generic_payload=True,
+                    secure=secure,
+                    messages_per_stream=mps,
                     minimal_stack=not secure,
-                    categories=[SWEEP], channels=channels, outstanding=outstanding)
+                    categories=[SWEEP])
 
-  def __str__(self):
-    return 'c++'
+            yield _ping_pong_scenario(
+                'cpp_generic_async_streaming_qps_1channel_1MBmsg_%s' % secstr,
+                rpc_type='STREAMING',
+                req_size=1024 * 1024,
+                resp_size=1024 * 1024,
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async',
+                use_generic_payload=True,
+                secure=secure,
+                minimal_stack=not secure,
+                categories=smoketest_categories + [SCALABLE],
+                channels=1,
+                outstanding=100)
+
+            yield _ping_pong_scenario(
+                'cpp_generic_async_streaming_qps_unconstrained_64KBmsg_%s' %
+                secstr,
+                rpc_type='STREAMING',
+                req_size=64 * 1024,
+                resp_size=64 * 1024,
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async',
+                use_generic_payload=True,
+                secure=secure,
+                minimal_stack=not secure,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_generic_async_streaming_qps_unconstrained_1cq_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async-limited',
+                use_generic_payload=True,
+                secure=secure,
+                client_threads_per_cq=1000000,
+                server_threads_per_cq=1000000,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_%s'
+                % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async',
+                use_generic_payload=True,
+                secure=secure,
+                client_threads_per_cq=2,
+                server_threads_per_cq=2,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_streaming_qps_unconstrained_1cq_%s' %
+                secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                unconstrained_client='async-limited',
+                secure=secure,
+                client_threads_per_cq=1000000,
+                server_threads_per_cq=1000000,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_%s'
+                % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                client_threads_per_cq=2,
+                server_threads_per_cq=2,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_unary_qps_unconstrained_1cq_%s' % secstr,
+                rpc_type='UNARY',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                unconstrained_client='async-limited',
+                secure=secure,
+                client_threads_per_cq=1000000,
+                server_threads_per_cq=1000000,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_%s' %
+                secstr,
+                rpc_type='UNARY',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                client_threads_per_cq=2,
+                server_threads_per_cq=2,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_generic_async_streaming_qps_one_server_core_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async-limited',
+                use_generic_payload=True,
+                async_server_threads=1,
+                minimal_stack=not secure,
+                secure=secure)
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_%s'
+                % (secstr),
+                rpc_type='UNARY',
+                client_type='ASYNC_CLIENT',
+                server_type='SYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                minimal_stack=not secure,
+                categories=smoketest_categories + [SCALABLE],
+                excluded_poll_engines=['poll-cv'])
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_%s'
+                % (secstr),
+                rpc_type='UNARY',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                channels=1,
+                outstanding=64,
+                req_size=128,
+                resp_size=8 * 1024 * 1024,
+                secure=secure,
+                minimal_stack=not secure,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_%s'
+                % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='SYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                minimal_stack=not secure,
+                categories=smoketest_categories + [SCALABLE],
+                excluded_poll_engines=['poll-cv'])
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_unary_ping_pong_%s_1MB' % secstr,
+                rpc_type='UNARY',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                req_size=1024 * 1024,
+                resp_size=1024 * 1024,
+                secure=secure,
+                minimal_stack=not secure,
+                categories=smoketest_categories + [SCALABLE])
+
+            for rpc_type in [
+                    'unary', 'streaming', 'streaming_from_client',
+                    'streaming_from_server'
+            ]:
+                for synchronicity in ['sync', 'async']:
+                    yield _ping_pong_scenario(
+                        'cpp_protobuf_%s_%s_ping_pong_%s' %
+                        (synchronicity, rpc_type, secstr),
+                        rpc_type=rpc_type.upper(),
+                        client_type='%s_CLIENT' % synchronicity.upper(),
+                        server_type='%s_SERVER' % synchronicity.upper(),
+                        async_server_threads=1,
+                        minimal_stack=not secure,
+                        secure=secure)
+
+                    for size in geometric_progression(1, 1024 * 1024 * 1024 + 1,
+                                                      8):
+                        yield _ping_pong_scenario(
+                            'cpp_protobuf_%s_%s_qps_unconstrained_%s_%db' %
+                            (synchronicity, rpc_type, secstr, size),
+                            rpc_type=rpc_type.upper(),
+                            req_size=size,
+                            resp_size=size,
+                            client_type='%s_CLIENT' % synchronicity.upper(),
+                            server_type='%s_SERVER' % synchronicity.upper(),
+                            unconstrained_client=synchronicity,
+                            secure=secure,
+                            minimal_stack=not secure,
+                            categories=[SWEEP])
+
+                    yield _ping_pong_scenario(
+                        'cpp_protobuf_%s_%s_qps_unconstrained_%s' %
+                        (synchronicity, rpc_type, secstr),
+                        rpc_type=rpc_type.upper(),
+                        client_type='%s_CLIENT' % synchronicity.upper(),
+                        server_type='%s_SERVER' % synchronicity.upper(),
+                        unconstrained_client=synchronicity,
+                        secure=secure,
+                        minimal_stack=not secure,
+                        server_threads_per_cq=3,
+                        client_threads_per_cq=3,
+                        categories=smoketest_categories + [SCALABLE])
+
+                    # TODO(vjpai): Re-enable this test. It has a lot of timeouts
+                    # and hasn't yet been conclusively identified as a test failure
+                    # or race in the library
+                    # yield _ping_pong_scenario(
+                    #     'cpp_protobuf_%s_%s_qps_unconstrained_%s_500kib_resource_quota' % (synchronicity, rpc_type, secstr),
+                    #     rpc_type=rpc_type.upper(),
+                    #     client_type='%s_CLIENT' % synchronicity.upper(),
+                    #     server_type='%s_SERVER' % synchronicity.upper(),
+                    #     unconstrained_client=synchronicity,
+                    #     secure=secure,
+                    #     categories=smoketest_categories+[SCALABLE],
+                    #     resource_quota_size=500*1024)
+
+                    if rpc_type == 'streaming':
+                        for mps in geometric_progression(1, 20, 10):
+                            yield _ping_pong_scenario(
+                                'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s'
+                                % (synchronicity, rpc_type, mps, secstr),
+                                rpc_type=rpc_type.upper(),
+                                client_type='%s_CLIENT' % synchronicity.upper(),
+                                server_type='%s_SERVER' % synchronicity.upper(),
+                                unconstrained_client=synchronicity,
+                                secure=secure,
+                                messages_per_stream=mps,
+                                minimal_stack=not secure,
+                                categories=smoketest_categories + [SCALABLE])
+
+                        for mps in geometric_progression(1, 200, math.sqrt(10)):
+                            yield _ping_pong_scenario(
+                                'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s'
+                                % (synchronicity, rpc_type, mps, secstr),
+                                rpc_type=rpc_type.upper(),
+                                client_type='%s_CLIENT' % synchronicity.upper(),
+                                server_type='%s_SERVER' % synchronicity.upper(),
+                                unconstrained_client=synchronicity,
+                                secure=secure,
+                                messages_per_stream=mps,
+                                minimal_stack=not secure,
+                                categories=[SWEEP])
+
+                    for channels in geometric_progression(1, 20000,
+                                                          math.sqrt(10)):
+                        for outstanding in geometric_progression(1, 200000,
+                                                                 math.sqrt(10)):
+                            if synchronicity == 'sync' and outstanding > 1200:
+                                continue
+                            if outstanding < channels: continue
+                            yield _ping_pong_scenario(
+                                'cpp_protobuf_%s_%s_qps_unconstrained_%s_%d_channels_%d_outstanding'
+                                % (synchronicity, rpc_type, secstr, channels,
+                                   outstanding),
+                                rpc_type=rpc_type.upper(),
+                                client_type='%s_CLIENT' % synchronicity.upper(),
+                                server_type='%s_SERVER' % synchronicity.upper(),
+                                unconstrained_client=synchronicity,
+                                secure=secure,
+                                minimal_stack=not secure,
+                                categories=[SWEEP],
+                                channels=channels,
+                                outstanding=outstanding)
+
+    def __str__(self):
+        return 'c++'
 
 
 class CSharpLanguage:
 
-  def __init__(self):
-    self.safename = str(self)
+    def __init__(self):
+        self.safename = str(self)
 
-  def worker_cmdline(self):
-    return ['tools/run_tests/performance/run_worker_csharp.sh']
+    def worker_cmdline(self):
+        return ['tools/run_tests/performance/run_worker_csharp.sh']
 
-  def worker_port_offset(self):
-    return 100
+    def worker_port_offset(self):
+        return 100
 
-  def scenarios(self):
-    yield _ping_pong_scenario(
-        'csharp_generic_async_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-        use_generic_payload=True,
-        categories=[SMOKETEST, SCALABLE])
+    def scenarios(self):
+        yield _ping_pong_scenario(
+            'csharp_generic_async_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_GENERIC_SERVER',
+            use_generic_payload=True,
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_generic_async_streaming_ping_pong_insecure_1MB', rpc_type='STREAMING',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-        req_size=1024*1024, resp_size=1024*1024,
-        use_generic_payload=True,
-        secure=False,
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_generic_async_streaming_ping_pong_insecure_1MB',
+            rpc_type='STREAMING',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_GENERIC_SERVER',
+            req_size=1024 * 1024,
+            resp_size=1024 * 1024,
+            use_generic_payload=True,
+            secure=False,
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_generic_async_streaming_qps_unconstrained_insecure', rpc_type='STREAMING',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-        unconstrained_client='async', use_generic_payload=True,
-        secure=False,
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_generic_async_streaming_qps_unconstrained_insecure',
+            rpc_type='STREAMING',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_GENERIC_SERVER',
+            unconstrained_client='async',
+            use_generic_payload=True,
+            secure=False,
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
+        yield _ping_pong_scenario(
+            'csharp_protobuf_async_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER')
 
-    yield _ping_pong_scenario(
-        'csharp_protobuf_async_unary_ping_pong', rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_protobuf_async_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_protobuf_sync_to_async_unary_ping_pong', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER')
+        yield _ping_pong_scenario(
+            'csharp_protobuf_sync_to_async_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER')
 
-    yield _ping_pong_scenario(
-        'csharp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        unconstrained_client='async',
-        categories=[SMOKETEST,SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_protobuf_async_unary_qps_unconstrained',
+            rpc_type='UNARY',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            unconstrained_client='async',
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_protobuf_async_streaming_qps_unconstrained', rpc_type='STREAMING',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        unconstrained_client='async',
-        categories=[SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_protobuf_async_streaming_qps_unconstrained',
+            rpc_type='STREAMING',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            unconstrained_client='async',
+            categories=[SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        server_language='c++', async_server_threads=1,
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_to_cpp_protobuf_sync_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1,
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_to_cpp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        server_language='c++', async_server_threads=1)
+        yield _ping_pong_scenario(
+            'csharp_to_cpp_protobuf_async_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1)
 
-    yield _ping_pong_scenario(
-        'csharp_to_cpp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        unconstrained_client='async', server_language='c++',
-        categories=[SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_to_cpp_protobuf_async_unary_qps_unconstrained',
+            rpc_type='UNARY',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            unconstrained_client='async',
+            server_language='c++',
+            categories=[SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        unconstrained_client='sync', server_language='c++',
-        categories=[SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            unconstrained_client='sync',
+            server_language='c++',
+            categories=[SCALABLE])
 
-    yield _ping_pong_scenario(
-        'cpp_to_csharp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        unconstrained_client='async', client_language='c++',
-        categories=[SCALABLE])
+        yield _ping_pong_scenario(
+            'cpp_to_csharp_protobuf_async_unary_qps_unconstrained',
+            rpc_type='UNARY',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            unconstrained_client='async',
+            client_language='c++',
+            categories=[SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_protobuf_async_unary_ping_pong_1MB', rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        req_size=1024*1024, resp_size=1024*1024,
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_protobuf_async_unary_ping_pong_1MB',
+            rpc_type='UNARY',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            req_size=1024 * 1024,
+            resp_size=1024 * 1024,
+            categories=[SMOKETEST, SCALABLE])
 
-  def __str__(self):
-    return 'csharp'
+    def __str__(self):
+        return 'csharp'
+
 
 class PythonLanguage:
 
-  def __init__(self):
-    self.safename = 'python'
+    def __init__(self):
+        self.safename = 'python'
 
-  def worker_cmdline(self):
-    return ['tools/run_tests/performance/run_worker_python.sh']
+    def worker_cmdline(self):
+        return ['tools/run_tests/performance/run_worker_python.sh']
 
-  def worker_port_offset(self):
-    return 500
+    def worker_port_offset(self):
+        return 500
 
-  def scenarios(self):
-    yield _ping_pong_scenario(
-        'python_generic_sync_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-        use_generic_payload=True,
-        categories=[SMOKETEST, SCALABLE])
+    def scenarios(self):
+        yield _ping_pong_scenario(
+            'python_generic_sync_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_GENERIC_SERVER',
+            use_generic_payload=True,
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'python_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER')
+        yield _ping_pong_scenario(
+            'python_protobuf_sync_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER')
 
-    yield _ping_pong_scenario(
-        'python_protobuf_async_unary_ping_pong', rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
+        yield _ping_pong_scenario(
+            'python_protobuf_async_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER')
 
-    yield _ping_pong_scenario(
-        'python_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'python_protobuf_sync_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'python_protobuf_sync_unary_qps_unconstrained', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        unconstrained_client='sync')
+        yield _ping_pong_scenario(
+            'python_protobuf_sync_unary_qps_unconstrained',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            unconstrained_client='sync')
 
-    yield _ping_pong_scenario(
-        'python_protobuf_sync_streaming_qps_unconstrained', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        unconstrained_client='sync')
+        yield _ping_pong_scenario(
+            'python_protobuf_sync_streaming_qps_unconstrained',
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            unconstrained_client='sync')
 
-    yield _ping_pong_scenario(
-        'python_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        server_language='c++', async_server_threads=1,
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'python_to_cpp_protobuf_sync_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1,
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'python_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        server_language='c++', async_server_threads=1)
+        yield _ping_pong_scenario(
+            'python_to_cpp_protobuf_sync_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1)
 
-    yield _ping_pong_scenario(
-        'python_protobuf_sync_unary_ping_pong_1MB', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        req_size=1024*1024, resp_size=1024*1024,
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'python_protobuf_sync_unary_ping_pong_1MB',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            req_size=1024 * 1024,
+            resp_size=1024 * 1024,
+            categories=[SMOKETEST, SCALABLE])
 
-  def __str__(self):
-    return 'python'
+    def __str__(self):
+        return 'python'
+
 
 class RubyLanguage:
 
-  def __init__(self):
-    pass
-    self.safename = str(self)
+    def __init__(self):
+        pass
+        self.safename = str(self)
 
-  def worker_cmdline(self):
-    return ['tools/run_tests/performance/run_worker_ruby.sh']
+    def worker_cmdline(self):
+        return ['tools/run_tests/performance/run_worker_ruby.sh']
 
-  def worker_port_offset(self):
-    return 300
+    def worker_port_offset(self):
+        return 300
 
-  def scenarios(self):
-    yield _ping_pong_scenario(
-        'ruby_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        categories=[SMOKETEST, SCALABLE])
+    def scenarios(self):
+        yield _ping_pong_scenario(
+            'ruby_protobuf_sync_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'ruby_protobuf_unary_ping_pong', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'ruby_protobuf_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'ruby_protobuf_sync_unary_qps_unconstrained', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        unconstrained_client='sync')
+        yield _ping_pong_scenario(
+            'ruby_protobuf_sync_unary_qps_unconstrained',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            unconstrained_client='sync')
 
-    yield _ping_pong_scenario(
-        'ruby_protobuf_sync_streaming_qps_unconstrained', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        unconstrained_client='sync')
+        yield _ping_pong_scenario(
+            'ruby_protobuf_sync_streaming_qps_unconstrained',
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            unconstrained_client='sync')
 
-    yield _ping_pong_scenario(
-        'ruby_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        server_language='c++', async_server_threads=1)
+        yield _ping_pong_scenario(
+            'ruby_to_cpp_protobuf_sync_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1)
 
-    yield _ping_pong_scenario(
-        'ruby_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        server_language='c++', async_server_threads=1)
+        yield _ping_pong_scenario(
+            'ruby_to_cpp_protobuf_sync_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1)
 
-    yield _ping_pong_scenario(
-        'ruby_protobuf_unary_ping_pong_1MB', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        req_size=1024*1024, resp_size=1024*1024,
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'ruby_protobuf_unary_ping_pong_1MB',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            req_size=1024 * 1024,
+            resp_size=1024 * 1024,
+            categories=[SMOKETEST, SCALABLE])
 
-  def __str__(self):
-    return 'ruby'
+    def __str__(self):
+        return 'ruby'
 
 
 class Php7Language:
 
-  def __init__(self, php7_protobuf_c=False):
-    pass
-    self.php7_protobuf_c=php7_protobuf_c
-    self.safename = str(self)
+    def __init__(self, php7_protobuf_c=False):
+        pass
+        self.php7_protobuf_c = php7_protobuf_c
+        self.safename = str(self)
 
-  def worker_cmdline(self):
-    if self.php7_protobuf_c:
-        return ['tools/run_tests/performance/run_worker_php.sh', '--use_protobuf_c_extension']
-    return ['tools/run_tests/performance/run_worker_php.sh']
+    def worker_cmdline(self):
+        if self.php7_protobuf_c:
+            return [
+                'tools/run_tests/performance/run_worker_php.sh',
+                '--use_protobuf_c_extension'
+            ]
+        return ['tools/run_tests/performance/run_worker_php.sh']
 
-  def worker_port_offset(self):
-    if self.php7_protobuf_c:
-        return 900
-    return 800
+    def worker_port_offset(self):
+        if self.php7_protobuf_c:
+            return 900
+        return 800
 
-  def scenarios(self):
-    php7_extension_mode='php7_protobuf_php_extension'
-    if self.php7_protobuf_c:
-        php7_extension_mode='php7_protobuf_c_extension'
+    def scenarios(self):
+        php7_extension_mode = 'php7_protobuf_php_extension'
+        if self.php7_protobuf_c:
+            php7_extension_mode = 'php7_protobuf_c_extension'
 
-    yield _ping_pong_scenario(
-        '%s_to_cpp_protobuf_sync_unary_ping_pong' % php7_extension_mode,
-        rpc_type='UNARY', client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        server_language='c++', async_server_threads=1)
+        yield _ping_pong_scenario(
+            '%s_to_cpp_protobuf_sync_unary_ping_pong' % php7_extension_mode,
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1)
 
-    yield _ping_pong_scenario(
-        '%s_to_cpp_protobuf_sync_streaming_ping_pong' % php7_extension_mode,
-        rpc_type='STREAMING', client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        server_language='c++', async_server_threads=1)
+        yield _ping_pong_scenario(
+            '%s_to_cpp_protobuf_sync_streaming_ping_pong' % php7_extension_mode,
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1)
 
-    # TODO(ddyihai): Investigate why when async_server_threads=1/CPU usage 340%, the QPS performs
-    # better than async_server_threads=0/CPU usage 490%.
-    yield _ping_pong_scenario(
-        '%s_to_cpp_protobuf_sync_unary_qps_unconstrained' % php7_extension_mode,
-        rpc_type='UNARY', client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        server_language='c++', outstanding=1, async_server_threads=1, unconstrained_client='sync')
+        # TODO(ddyihai): Investigate why when async_server_threads=1/CPU usage 340%, the QPS performs
+        # better than async_server_threads=0/CPU usage 490%.
+        yield _ping_pong_scenario(
+            '%s_to_cpp_protobuf_sync_unary_qps_unconstrained' %
+            php7_extension_mode,
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            server_language='c++',
+            outstanding=1,
+            async_server_threads=1,
+            unconstrained_client='sync')
 
-    yield _ping_pong_scenario(
-        '%s_to_cpp_protobuf_sync_streaming_qps_unconstrained' % php7_extension_mode,
-        rpc_type='STREAMING', client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        server_language='c++', outstanding=1, async_server_threads=1, unconstrained_client='sync')
+        yield _ping_pong_scenario(
+            '%s_to_cpp_protobuf_sync_streaming_qps_unconstrained' %
+            php7_extension_mode,
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            server_language='c++',
+            outstanding=1,
+            async_server_threads=1,
+            unconstrained_client='sync')
 
-  def __str__(self):
-    if self.php7_protobuf_c:
-        return 'php7_protobuf_c'
-    return 'php7'
+    def __str__(self):
+        if self.php7_protobuf_c:
+            return 'php7_protobuf_c'
+        return 'php7'
+
 
 class JavaLanguage:
 
-  def __init__(self):
-    pass
-    self.safename = str(self)
+    def __init__(self):
+        pass
+        self.safename = str(self)
 
-  def worker_cmdline(self):
-    return ['tools/run_tests/performance/run_worker_java.sh']
+    def worker_cmdline(self):
+        return ['tools/run_tests/performance/run_worker_java.sh']
 
-  def worker_port_offset(self):
-    return 400
+    def worker_port_offset(self):
+        return 400
 
-  def scenarios(self):
-    for secure in [True, False]:
-      secstr = 'secure' if secure else 'insecure'
-      smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
+    def scenarios(self):
+        for secure in [True, False]:
+            secstr = 'secure' if secure else 'insecure'
+            smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
 
-      yield _ping_pong_scenario(
-          'java_generic_async_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-          use_generic_payload=True, async_server_threads=1,
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
-          categories=smoketest_categories)
+            yield _ping_pong_scenario(
+                'java_generic_async_streaming_ping_pong_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                use_generic_payload=True,
+                async_server_threads=1,
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS,
+                categories=smoketest_categories)
 
-      yield _ping_pong_scenario(
-          'java_protobuf_async_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-          async_server_threads=1,
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
+            yield _ping_pong_scenario(
+                'java_protobuf_async_streaming_ping_pong_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                async_server_threads=1,
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS)
 
-      yield _ping_pong_scenario(
-          'java_protobuf_async_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
-          client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-          async_server_threads=1,
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
-          categories=smoketest_categories)
+            yield _ping_pong_scenario(
+                'java_protobuf_async_unary_ping_pong_%s' % secstr,
+                rpc_type='UNARY',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                async_server_threads=1,
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS,
+                categories=smoketest_categories)
 
-      yield _ping_pong_scenario(
-          'java_protobuf_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
-          client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-          async_server_threads=1,
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
+            yield _ping_pong_scenario(
+                'java_protobuf_unary_ping_pong_%s' % secstr,
+                rpc_type='UNARY',
+                client_type='SYNC_CLIENT',
+                server_type='SYNC_SERVER',
+                async_server_threads=1,
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS)
 
-      yield _ping_pong_scenario(
-          'java_protobuf_async_unary_qps_unconstrained_%s' % secstr, rpc_type='UNARY',
-          client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
-          categories=smoketest_categories+[SCALABLE])
+            yield _ping_pong_scenario(
+                'java_protobuf_async_unary_qps_unconstrained_%s' % secstr,
+                rpc_type='UNARY',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS,
+                categories=smoketest_categories + [SCALABLE])
 
-      yield _ping_pong_scenario(
-          'java_protobuf_async_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
-          categories=[SCALABLE])
+            yield _ping_pong_scenario(
+                'java_protobuf_async_streaming_qps_unconstrained_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS,
+                categories=[SCALABLE])
 
-      yield _ping_pong_scenario(
-          'java_generic_async_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async', use_generic_payload=True,
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
-          categories=[SCALABLE])
+            yield _ping_pong_scenario(
+                'java_generic_async_streaming_qps_unconstrained_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async',
+                use_generic_payload=True,
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS,
+                categories=[SCALABLE])
 
-      yield _ping_pong_scenario(
-          'java_generic_async_streaming_qps_one_server_core_%s' % secstr, rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async-limited', use_generic_payload=True,
-          async_server_threads=1,
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
+            yield _ping_pong_scenario(
+                'java_generic_async_streaming_qps_one_server_core_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async-limited',
+                use_generic_payload=True,
+                async_server_threads=1,
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS)
 
-      # TODO(jtattermusch): add scenarios java vs C++
+            # TODO(jtattermusch): add scenarios java vs C++
 
-  def __str__(self):
-    return 'java'
+    def __str__(self):
+        return 'java'
 
 
 class GoLanguage:
 
-  def __init__(self):
-    pass
-    self.safename = str(self)
+    def __init__(self):
+        pass
+        self.safename = str(self)
 
-  def worker_cmdline(self):
-    return ['tools/run_tests/performance/run_worker_go.sh']
+    def worker_cmdline(self):
+        return ['tools/run_tests/performance/run_worker_go.sh']
 
-  def worker_port_offset(self):
-    return 600
+    def worker_port_offset(self):
+        return 600
 
-  def scenarios(self):
-    for secure in [True, False]:
-      secstr = 'secure' if secure else 'insecure'
-      smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
+    def scenarios(self):
+        for secure in [True, False]:
+            secstr = 'secure' if secure else 'insecure'
+            smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
 
-      # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
-      # but that's mostly because of lack of better name of the enum value.
-      yield _ping_pong_scenario(
-          'go_generic_sync_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
-          client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-          use_generic_payload=True, async_server_threads=1,
-          secure=secure,
-          categories=smoketest_categories)
+            # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
+            # but that's mostly because of lack of better name of the enum value.
+            yield _ping_pong_scenario(
+                'go_generic_sync_streaming_ping_pong_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='SYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                use_generic_payload=True,
+                async_server_threads=1,
+                secure=secure,
+                categories=smoketest_categories)
 
-      yield _ping_pong_scenario(
-          'go_protobuf_sync_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
-          client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-          async_server_threads=1,
-          secure=secure)
+            yield _ping_pong_scenario(
+                'go_protobuf_sync_streaming_ping_pong_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='SYNC_CLIENT',
+                server_type='SYNC_SERVER',
+                async_server_threads=1,
+                secure=secure)
 
-      yield _ping_pong_scenario(
-          'go_protobuf_sync_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
-          client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-          async_server_threads=1,
-          secure=secure,
-          categories=smoketest_categories)
+            yield _ping_pong_scenario(
+                'go_protobuf_sync_unary_ping_pong_%s' % secstr,
+                rpc_type='UNARY',
+                client_type='SYNC_CLIENT',
+                server_type='SYNC_SERVER',
+                async_server_threads=1,
+                secure=secure,
+                categories=smoketest_categories)
 
-      # unconstrained_client='async' is intended (client uses goroutines)
-      yield _ping_pong_scenario(
-          'go_protobuf_sync_unary_qps_unconstrained_%s' % secstr, rpc_type='UNARY',
-          client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure,
-          categories=smoketest_categories+[SCALABLE])
+            # unconstrained_client='async' is intended (client uses goroutines)
+            yield _ping_pong_scenario(
+                'go_protobuf_sync_unary_qps_unconstrained_%s' % secstr,
+                rpc_type='UNARY',
+                client_type='SYNC_CLIENT',
+                server_type='SYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                categories=smoketest_categories + [SCALABLE])
 
-      # unconstrained_client='async' is intended (client uses goroutines)
-      yield _ping_pong_scenario(
-          'go_protobuf_sync_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
-          client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure,
-          categories=[SCALABLE])
+            # unconstrained_client='async' is intended (client uses goroutines)
+            yield _ping_pong_scenario(
+                'go_protobuf_sync_streaming_qps_unconstrained_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='SYNC_CLIENT',
+                server_type='SYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                categories=[SCALABLE])
 
-      # unconstrained_client='async' is intended (client uses goroutines)
-      # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
-      # but that's mostly because of lack of better name of the enum value.
-      yield _ping_pong_scenario(
-          'go_generic_sync_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
-          client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async', use_generic_payload=True,
-          secure=secure,
-          categories=[SCALABLE])
+            # unconstrained_client='async' is intended (client uses goroutines)
+            # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
+            # but that's mostly because of lack of better name of the enum value.
+            yield _ping_pong_scenario(
+                'go_generic_sync_streaming_qps_unconstrained_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='SYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async',
+                use_generic_payload=True,
+                secure=secure,
+                categories=[SCALABLE])
 
-      # TODO(jtattermusch): add scenarios go vs C++
+            # TODO(jtattermusch): add scenarios go vs C++
 
-  def __str__(self):
-    return 'go'
+    def __str__(self):
+        return 'go'
 
 
 LANGUAGES = {
-    'c++' : CXXLanguage(),
-    'csharp' : CSharpLanguage(),
-    'ruby' : RubyLanguage(),
-    'php7' : Php7Language(),
-    'php7_protobuf_c' : Php7Language(php7_protobuf_c=True),
-    'java' : JavaLanguage(),
-    'python' : PythonLanguage(),
-    'go' : GoLanguage(),
+    'c++': CXXLanguage(),
+    'csharp': CSharpLanguage(),
+    'ruby': RubyLanguage(),
+    'php7': Php7Language(),
+    'php7_protobuf_c': Php7Language(php7_protobuf_c=True),
+    'java': JavaLanguage(),
+    'python': PythonLanguage(),
+    'go': GoLanguage(),
 }
diff --git a/tools/run_tests/python_utils/antagonist.py b/tools/run_tests/python_utils/antagonist.py
index 0d79ce0..a928a4c 100755
--- a/tools/run_tests/python_utils/antagonist.py
+++ b/tools/run_tests/python_utils/antagonist.py
@@ -12,8 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """This is used by run_tests.py to create cpu load on a machine"""
 
 while True:
-	pass
+    pass
diff --git a/tools/run_tests/python_utils/comment_on_pr.py b/tools/run_tests/python_utils/comment_on_pr.py
index 21b9bb7..399c996 100644
--- a/tools/run_tests/python_utils/comment_on_pr.py
+++ b/tools/run_tests/python_utils/comment_on_pr.py
@@ -16,19 +16,22 @@
 import json
 import urllib2
 
+
 def comment_on_pr(text):
-  if 'JENKINS_OAUTH_TOKEN' not in os.environ:
-    print 'Missing JENKINS_OAUTH_TOKEN env var: not commenting'
-    return
-  if 'ghprbPullId' not in os.environ:
-    print 'Missing ghprbPullId env var: not commenting'
-    return
-  req = urllib2.Request(
-      url = 'https://api.github.com/repos/grpc/grpc/issues/%s/comments' %
-          os.environ['ghprbPullId'],
-      data = json.dumps({'body': text}),
-      headers = {
-        'Authorization': 'token %s' % os.environ['JENKINS_OAUTH_TOKEN'],
-        'Content-Type': 'application/json',
-      })
-  print urllib2.urlopen(req).read()
+    if 'JENKINS_OAUTH_TOKEN' not in os.environ:
+        print 'Missing JENKINS_OAUTH_TOKEN env var: not commenting'
+        return
+    if 'ghprbPullId' not in os.environ:
+        print 'Missing ghprbPullId env var: not commenting'
+        return
+    req = urllib2.Request(
+        url='https://api.github.com/repos/grpc/grpc/issues/%s/comments' %
+        os.environ['ghprbPullId'],
+        data=json.dumps({
+            'body': text
+        }),
+        headers={
+            'Authorization': 'token %s' % os.environ['JENKINS_OAUTH_TOKEN'],
+            'Content-Type': 'application/json',
+        })
+    print urllib2.urlopen(req).read()
diff --git a/tools/run_tests/python_utils/dockerjob.py b/tools/run_tests/python_utils/dockerjob.py
index 2f5285b..d2941c0 100755
--- a/tools/run_tests/python_utils/dockerjob.py
+++ b/tools/run_tests/python_utils/dockerjob.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Helpers to run docker instances as jobs."""
 
 from __future__ import print_function
@@ -28,102 +27,109 @@
 
 
 def random_name(base_name):
-  """Randomizes given base name."""
-  return '%s_%s' % (base_name, uuid.uuid4())
+    """Randomizes given base name."""
+    return '%s_%s' % (base_name, uuid.uuid4())
 
 
 def docker_kill(cid):
-  """Kills a docker container. Returns True if successful."""
-  return subprocess.call(['docker','kill', str(cid)],
-                         stdin=subprocess.PIPE,
-                         stdout=_DEVNULL,
-                         stderr=subprocess.STDOUT) == 0
+    """Kills a docker container. Returns True if successful."""
+    return subprocess.call(
+        ['docker', 'kill', str(cid)],
+        stdin=subprocess.PIPE,
+        stdout=_DEVNULL,
+        stderr=subprocess.STDOUT) == 0
 
 
 def docker_mapped_port(cid, port, timeout_seconds=15):
-  """Get port mapped to internal given internal port for given container."""
-  started = time.time()
-  while time.time() - started < timeout_seconds:
-    try:
-      output = subprocess.check_output('docker port %s %s' % (cid, port),
-                                       stderr=_DEVNULL,
-                                       shell=True)
-      return int(output.split(':', 2)[1])
-    except subprocess.CalledProcessError as e:
-      pass
-  raise Exception('Failed to get exposed port %s for container %s.' %
-                  (port, cid))
+    """Get port mapped to internal given internal port for given container."""
+    started = time.time()
+    while time.time() - started < timeout_seconds:
+        try:
+            output = subprocess.check_output(
+                'docker port %s %s' % (cid, port), stderr=_DEVNULL, shell=True)
+            return int(output.split(':', 2)[1])
+        except subprocess.CalledProcessError as e:
+            pass
+    raise Exception('Failed to get exposed port %s for container %s.' %
+                    (port, cid))
 
 
 def wait_for_healthy(cid, shortname, timeout_seconds):
-  """Wait timeout_seconds for the container to become healthy"""
-  started = time.time()
-  while time.time() - started < timeout_seconds:
-    try:
-      output = subprocess.check_output(
-          ['docker', 'inspect', '--format="{{.State.Health.Status}}"', cid],
-          stderr=_DEVNULL)
-      if output.strip('\n') == 'healthy':
-        return
-    except subprocess.CalledProcessError as e:
-      pass
-    time.sleep(1)
-  raise Exception('Timed out waiting for %s (%s) to pass health check' %
-                  (shortname, cid))
+    """Wait timeout_seconds for the container to become healthy"""
+    started = time.time()
+    while time.time() - started < timeout_seconds:
+        try:
+            output = subprocess.check_output(
+                [
+                    'docker', 'inspect', '--format="{{.State.Health.Status}}"',
+                    cid
+                ],
+                stderr=_DEVNULL)
+            if output.strip('\n') == 'healthy':
+                return
+        except subprocess.CalledProcessError as e:
+            pass
+        time.sleep(1)
+    raise Exception('Timed out waiting for %s (%s) to pass health check' %
+                    (shortname, cid))
 
 
 def finish_jobs(jobs):
-  """Kills given docker containers and waits for corresponding jobs to finish"""
-  for job in jobs:
-    job.kill(suppress_failure=True)
+    """Kills given docker containers and waits for corresponding jobs to finish"""
+    for job in jobs:
+        job.kill(suppress_failure=True)
 
-  while any(job.is_running() for job in jobs):
-    time.sleep(1)
+    while any(job.is_running() for job in jobs):
+        time.sleep(1)
 
 
 def image_exists(image):
-  """Returns True if given docker image exists."""
-  return subprocess.call(['docker','inspect', image],
-                         stdin=subprocess.PIPE,
-                         stdout=_DEVNULL,
-                         stderr=subprocess.STDOUT) == 0
+    """Returns True if given docker image exists."""
+    return subprocess.call(
+        ['docker', 'inspect', image],
+        stdin=subprocess.PIPE,
+        stdout=_DEVNULL,
+        stderr=subprocess.STDOUT) == 0
 
 
 def remove_image(image, skip_nonexistent=False, max_retries=10):
-  """Attempts to remove docker image with retries."""
-  if skip_nonexistent and not image_exists(image):
-    return True
-  for attempt in range(0, max_retries):
-    if subprocess.call(['docker','rmi', '-f', image],
-                       stdin=subprocess.PIPE,
-                       stdout=_DEVNULL,
-                       stderr=subprocess.STDOUT) == 0:
-      return True
-    time.sleep(2)
-  print('Failed to remove docker image %s' % image)
-  return False
+    """Attempts to remove docker image with retries."""
+    if skip_nonexistent and not image_exists(image):
+        return True
+    for attempt in range(0, max_retries):
+        if subprocess.call(
+            ['docker', 'rmi', '-f', image],
+                stdin=subprocess.PIPE,
+                stdout=_DEVNULL,
+                stderr=subprocess.STDOUT) == 0:
+            return True
+        time.sleep(2)
+    print('Failed to remove docker image %s' % image)
+    return False
 
 
 class DockerJob:
-  """Encapsulates a job"""
+    """Encapsulates a job"""
 
-  def __init__(self, spec):
-    self._spec = spec
-    self._job = jobset.Job(spec, newline_on_success=True, travis=True, add_env={})
-    self._container_name = spec.container_name
+    def __init__(self, spec):
+        self._spec = spec
+        self._job = jobset.Job(
+            spec, newline_on_success=True, travis=True, add_env={})
+        self._container_name = spec.container_name
 
-  def mapped_port(self, port):
-    return docker_mapped_port(self._container_name, port)
+    def mapped_port(self, port):
+        return docker_mapped_port(self._container_name, port)
 
-  def wait_for_healthy(self, timeout_seconds):
-    wait_for_healthy(self._container_name, self._spec.shortname, timeout_seconds)
+    def wait_for_healthy(self, timeout_seconds):
+        wait_for_healthy(self._container_name, self._spec.shortname,
+                         timeout_seconds)
 
-  def kill(self, suppress_failure=False):
-    """Sends kill signal to the container."""
-    if suppress_failure:
-      self._job.suppress_failure_message()
-    return docker_kill(self._container_name)
+    def kill(self, suppress_failure=False):
+        """Sends kill signal to the container."""
+        if suppress_failure:
+            self._job.suppress_failure_message()
+        return docker_kill(self._container_name)
 
-  def is_running(self):
-    """Polls a job and returns True if given job is still running."""
-    return self._job.state() == jobset._RUNNING
+    def is_running(self):
+        """Polls a job and returns True if given job is still running."""
+        return self._job.state() == jobset._RUNNING
diff --git a/tools/run_tests/python_utils/filter_pull_request_tests.py b/tools/run_tests/python_utils/filter_pull_request_tests.py
index e880734..8e0dc70 100644
--- a/tools/run_tests/python_utils/filter_pull_request_tests.py
+++ b/tools/run_tests/python_utils/filter_pull_request_tests.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Filter out tests based on file differences compared to merge target branch"""
 
 from __future__ import print_function
@@ -23,24 +22,25 @@
 
 
 class TestSuite:
-  """
+    """
   Contains label to identify job as belonging to this test suite and
   triggers to identify if changed files are relevant
   """
-  def __init__(self, labels):
-    """
+
+    def __init__(self, labels):
+        """
     Build TestSuite to group tests based on labeling
     :param label: strings that should match a jobs's platform, config, language, or test group
     """
-    self.triggers = []
-    self.labels = labels
+        self.triggers = []
+        self.labels = labels
 
-  def add_trigger(self, trigger):
-    """
+    def add_trigger(self, trigger):
+        """
     Add a regex to list of triggers that determine if a changed file should run tests
     :param trigger: regex matching file relevant to tests
     """
-    self.triggers.append(trigger)
+        self.triggers.append(trigger)
 
 
 # Create test suites
@@ -55,10 +55,11 @@
 _LINUX_TEST_SUITE = TestSuite(['linux'])
 _WINDOWS_TEST_SUITE = TestSuite(['windows'])
 _MACOS_TEST_SUITE = TestSuite(['macos'])
-_ALL_TEST_SUITES = [_CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE,
-                    _NODE_TEST_SUITE, _OBJC_TEST_SUITE, _PHP_TEST_SUITE,
-                    _PYTHON_TEST_SUITE, _RUBY_TEST_SUITE, _LINUX_TEST_SUITE,
-                    _WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE]
+_ALL_TEST_SUITES = [
+    _CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE, _NODE_TEST_SUITE,
+    _OBJC_TEST_SUITE, _PHP_TEST_SUITE, _PYTHON_TEST_SUITE, _RUBY_TEST_SUITE,
+    _LINUX_TEST_SUITE, _WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE
+]
 
 # Dictionary of whitelistable files where the key is a regex matching changed files
 # and the value is a list of tests that should be run. An empty list means that
@@ -66,46 +67,46 @@
 # match any of these regexes will trigger all tests
 # DO NOT CHANGE THIS UNLESS YOU KNOW WHAT YOU ARE DOING (be careful even if you do)
 _WHITELIST_DICT = {
-  '^doc/': [],
-  '^examples/': [],
-  '^include/grpc\+\+/': [_CPP_TEST_SUITE],
-  '^summerofcode/': [],
-  '^src/cpp/': [_CPP_TEST_SUITE],
-  '^src/csharp/': [_CSHARP_TEST_SUITE],
-  '^src/objective\-c/': [_OBJC_TEST_SUITE],
-  '^src/php/': [_PHP_TEST_SUITE],
-  '^src/python/': [_PYTHON_TEST_SUITE],
-  '^src/ruby/': [_RUBY_TEST_SUITE],
-  '^templates/': [],
-  '^test/core/': [_CORE_TEST_SUITE, _CPP_TEST_SUITE],
-  '^test/cpp/': [_CPP_TEST_SUITE],
-  '^test/distrib/cpp/': [_CPP_TEST_SUITE],
-  '^test/distrib/csharp/': [_CSHARP_TEST_SUITE],
-  '^test/distrib/php/': [_PHP_TEST_SUITE],
-  '^test/distrib/python/': [_PYTHON_TEST_SUITE],
-  '^test/distrib/ruby/': [_RUBY_TEST_SUITE],
-  '^vsprojects/': [_WINDOWS_TEST_SUITE],
-  'composer\.json$': [_PHP_TEST_SUITE],
-  'config\.m4$': [_PHP_TEST_SUITE],
-  'CONTRIBUTING\.md$': [],
-  'Gemfile$': [_RUBY_TEST_SUITE],
-  'grpc\.def$': [_WINDOWS_TEST_SUITE],
-  'grpc\.gemspec$': [_RUBY_TEST_SUITE],
-  'gRPC\.podspec$': [_OBJC_TEST_SUITE],
-  'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE],
-  'gRPC\-ProtoRPC\.podspec$': [_OBJC_TEST_SUITE],
-  'gRPC\-RxLibrary\.podspec$': [_OBJC_TEST_SUITE],
-  'INSTALL\.md$': [],
-  'LICENSE$': [],
-  'MANIFEST\.md$': [],
-  'package\.json$': [_PHP_TEST_SUITE],
-  'package\.xml$': [_PHP_TEST_SUITE],
-  'PATENTS$': [],
-  'PYTHON\-MANIFEST\.in$': [_PYTHON_TEST_SUITE],
-  'README\.md$': [],
-  'requirements\.txt$': [_PYTHON_TEST_SUITE],
-  'setup\.cfg$': [_PYTHON_TEST_SUITE],
-  'setup\.py$': [_PYTHON_TEST_SUITE]
+    '^doc/': [],
+    '^examples/': [],
+    '^include/grpc\+\+/': [_CPP_TEST_SUITE],
+    '^summerofcode/': [],
+    '^src/cpp/': [_CPP_TEST_SUITE],
+    '^src/csharp/': [_CSHARP_TEST_SUITE],
+    '^src/objective\-c/': [_OBJC_TEST_SUITE],
+    '^src/php/': [_PHP_TEST_SUITE],
+    '^src/python/': [_PYTHON_TEST_SUITE],
+    '^src/ruby/': [_RUBY_TEST_SUITE],
+    '^templates/': [],
+    '^test/core/': [_CORE_TEST_SUITE, _CPP_TEST_SUITE],
+    '^test/cpp/': [_CPP_TEST_SUITE],
+    '^test/distrib/cpp/': [_CPP_TEST_SUITE],
+    '^test/distrib/csharp/': [_CSHARP_TEST_SUITE],
+    '^test/distrib/php/': [_PHP_TEST_SUITE],
+    '^test/distrib/python/': [_PYTHON_TEST_SUITE],
+    '^test/distrib/ruby/': [_RUBY_TEST_SUITE],
+    '^vsprojects/': [_WINDOWS_TEST_SUITE],
+    'composer\.json$': [_PHP_TEST_SUITE],
+    'config\.m4$': [_PHP_TEST_SUITE],
+    'CONTRIBUTING\.md$': [],
+    'Gemfile$': [_RUBY_TEST_SUITE],
+    'grpc\.def$': [_WINDOWS_TEST_SUITE],
+    'grpc\.gemspec$': [_RUBY_TEST_SUITE],
+    'gRPC\.podspec$': [_OBJC_TEST_SUITE],
+    'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE],
+    'gRPC\-ProtoRPC\.podspec$': [_OBJC_TEST_SUITE],
+    'gRPC\-RxLibrary\.podspec$': [_OBJC_TEST_SUITE],
+    'INSTALL\.md$': [],
+    'LICENSE$': [],
+    'MANIFEST\.md$': [],
+    'package\.json$': [_PHP_TEST_SUITE],
+    'package\.xml$': [_PHP_TEST_SUITE],
+    'PATENTS$': [],
+    'PYTHON\-MANIFEST\.in$': [_PYTHON_TEST_SUITE],
+    'README\.md$': [],
+    'requirements\.txt$': [_PYTHON_TEST_SUITE],
+    'setup\.cfg$': [_PYTHON_TEST_SUITE],
+    'setup\.py$': [_PYTHON_TEST_SUITE]
 }
 
 # Regex that combines all keys in _WHITELIST_DICT
@@ -113,83 +114,88 @@
 
 # Add all triggers to their respective test suites
 for trigger, test_suites in six.iteritems(_WHITELIST_DICT):
-  for test_suite in test_suites:
-    test_suite.add_trigger(trigger)
+    for test_suite in test_suites:
+        test_suite.add_trigger(trigger)
 
 
 def _get_changed_files(base_branch):
-  """
+    """
   Get list of changed files between current branch and base of target merge branch
   """
-  # Get file changes between branch and merge-base of specified branch
-  # Not combined to be Windows friendly
-  base_commit = check_output(["git", "merge-base", base_branch, "HEAD"]).rstrip()
-  return check_output(["git", "diff", base_commit, "--name-only", "HEAD"]).splitlines()
+    # Get file changes between branch and merge-base of specified branch
+    # Not combined to be Windows friendly
+    base_commit = check_output(
+        ["git", "merge-base", base_branch, "HEAD"]).rstrip()
+    return check_output(
+        ["git", "diff", base_commit, "--name-only", "HEAD"]).splitlines()
 
 
 def _can_skip_tests(file_names, triggers):
-  """
+    """
   Determines if tests are skippable based on if all files do not match list of regexes
   :param file_names: list of changed files generated by _get_changed_files()
   :param triggers: list of regexes matching file name that indicates tests should be run
   :return: safe to skip tests
   """
-  for file_name in file_names:
-    if any(re.match(trigger, file_name) for trigger in triggers):
-      return False
-  return True
+    for file_name in file_names:
+        if any(re.match(trigger, file_name) for trigger in triggers):
+            return False
+    return True
 
 
 def _remove_irrelevant_tests(tests, skippable_labels):
-  """
+    """
   Filters out tests by config or language - will not remove sanitizer tests
   :param tests: list of all tests generated by run_tests_matrix.py
   :param skippable_labels: list of languages and platforms with skippable tests
   :return: list of relevant tests
   """
-  # test.labels[0] is platform and test.labels[2] is language
-  # We skip a test if both are considered safe to skip
-  return [test for test in tests if test.labels[0] not in skippable_labels or \
-          test.labels[2] not in skippable_labels]
+    # test.labels[0] is platform and test.labels[2] is language
+    # We skip a test if both are considered safe to skip
+    return [test for test in tests if test.labels[0] not in skippable_labels or \
+            test.labels[2] not in skippable_labels]
 
 
 def affects_c_cpp(base_branch):
-  """
+    """
   Determines if a pull request's changes affect C/C++. This function exists because
   there are pull request tests that only test C/C++ code
   :param base_branch: branch that a pull request is requesting to merge into
   :return: boolean indicating whether C/C++ changes are made in pull request
   """
-  changed_files = _get_changed_files(base_branch)
-  # Run all tests if any changed file is not in the whitelist dictionary
-  for changed_file in changed_files:
-    if not re.match(_ALL_TRIGGERS, changed_file):
-      return True
-  return not _can_skip_tests(changed_files, _CPP_TEST_SUITE.triggers + _CORE_TEST_SUITE.triggers)
+    changed_files = _get_changed_files(base_branch)
+    # Run all tests if any changed file is not in the whitelist dictionary
+    for changed_file in changed_files:
+        if not re.match(_ALL_TRIGGERS, changed_file):
+            return True
+    return not _can_skip_tests(
+        changed_files, _CPP_TEST_SUITE.triggers + _CORE_TEST_SUITE.triggers)
 
 
 def filter_tests(tests, base_branch):
-  """
+    """
   Filters out tests that are safe to ignore
   :param tests: list of all tests generated by run_tests_matrix.py
   :return: list of relevant tests
   """
-  print('Finding file differences between gRPC %s branch and pull request...\n' % base_branch)
-  changed_files = _get_changed_files(base_branch)
-  for changed_file in changed_files:
-    print('  %s' % changed_file)
-  print('')
+    print(
+        'Finding file differences between gRPC %s branch and pull request...\n'
+        % base_branch)
+    changed_files = _get_changed_files(base_branch)
+    for changed_file in changed_files:
+        print('  %s' % changed_file)
+    print('')
 
-  # Run all tests if any changed file is not in the whitelist dictionary
-  for changed_file in changed_files:
-    if not re.match(_ALL_TRIGGERS, changed_file):
-      return(tests)
-  # Figure out which language and platform tests to run
-  skippable_labels = []
-  for test_suite in _ALL_TEST_SUITES:
-    if _can_skip_tests(changed_files, test_suite.triggers):
-      for label in test_suite.labels:
-        print('  %s tests safe to skip' % label)
-        skippable_labels.append(label)
-  tests = _remove_irrelevant_tests(tests, skippable_labels)
-  return tests
+    # Run all tests if any changed file is not in the whitelist dictionary
+    for changed_file in changed_files:
+        if not re.match(_ALL_TRIGGERS, changed_file):
+            return (tests)
+    # Figure out which language and platform tests to run
+    skippable_labels = []
+    for test_suite in _ALL_TEST_SUITES:
+        if _can_skip_tests(changed_files, test_suite.triggers):
+            for label in test_suite.labels:
+                print('  %s tests safe to skip' % label)
+                skippable_labels.append(label)
+    tests = _remove_irrelevant_tests(tests, skippable_labels)
+    return tests
diff --git a/tools/run_tests/python_utils/jobset.py b/tools/run_tests/python_utils/jobset.py
index 85eef44..454d09b 100755
--- a/tools/run_tests/python_utils/jobset.py
+++ b/tools/run_tests/python_utils/jobset.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Run a group of subprocesses and then finish."""
 
 from __future__ import print_function
@@ -28,11 +27,9 @@
 import time
 import errno
 
-
 # cpu cost measurement
 measure_cpu_costs = False
 
-
 _DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
 _MAX_RESULT_SIZE = 8192
 
@@ -42,63 +39,60 @@
 # characters to the PR description, which leak into the environment here
 # and cause failures.
 def strip_non_ascii_chars(s):
-  return ''.join(c for c in s if ord(c) < 128)
+    return ''.join(c for c in s if ord(c) < 128)
 
 
 def sanitized_environment(env):
-  sanitized = {}
-  for key, value in env.items():
-    sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
-  return sanitized
+    sanitized = {}
+    for key, value in env.items():
+        sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
+    return sanitized
 
 
 def platform_string():
-  if platform.system() == 'Windows':
-    return 'windows'
-  elif platform.system()[:7] == 'MSYS_NT':
-    return 'windows'
-  elif platform.system() == 'Darwin':
-    return 'mac'
-  elif platform.system() == 'Linux':
-    return 'linux'
-  else:
-    return 'posix'
+    if platform.system() == 'Windows':
+        return 'windows'
+    elif platform.system()[:7] == 'MSYS_NT':
+        return 'windows'
+    elif platform.system() == 'Darwin':
+        return 'mac'
+    elif platform.system() == 'Linux':
+        return 'linux'
+    else:
+        return 'posix'
 
 
 # setup a signal handler so that signal.pause registers 'something'
 # when a child finishes
 # not using futures and threading to avoid a dependency on subprocess32
 if platform_string() == 'windows':
-  pass
-else:
-  def alarm_handler(unused_signum, unused_frame):
     pass
+else:
 
-  signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
-  signal.signal(signal.SIGALRM, alarm_handler)
+    def alarm_handler(unused_signum, unused_frame):
+        pass
 
+    signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
+    signal.signal(signal.SIGALRM, alarm_handler)
 
 _SUCCESS = object()
 _FAILURE = object()
 _RUNNING = object()
 _KILLED = object()
 
-
 _COLORS = {
-    'red': [ 31, 0 ],
-    'green': [ 32, 0 ],
-    'yellow': [ 33, 0 ],
-    'lightgray': [ 37, 0],
-    'gray': [ 30, 1 ],
-    'purple': [ 35, 0 ],
-    'cyan': [ 36, 0 ]
-    }
-
+    'red': [31, 0],
+    'green': [32, 0],
+    'yellow': [33, 0],
+    'lightgray': [37, 0],
+    'gray': [30, 1],
+    'purple': [35, 0],
+    'cyan': [36, 0]
+}
 
 _BEGINNING_OF_LINE = '\x1b[0G'
 _CLEAR_LINE = '\x1b[2K'
 
-
 _TAG_COLOR = {
     'FAILED': 'red',
     'FLAKE': 'purple',
@@ -111,392 +105,435 @@
     'SUCCESS': 'green',
     'IDLE': 'gray',
     'SKIPPED': 'cyan'
-    }
+}
 
 _FORMAT = '%(asctime)-15s %(message)s'
 logging.basicConfig(level=logging.INFO, format=_FORMAT)
 
 
 def eintr_be_gone(fn):
-  """Run fn until it doesn't stop because of EINTR"""
-  while True:
-    try:
-      return fn()
-    except IOError, e:
-      if e.errno != errno.EINTR:
-        raise
-
+    """Run fn until it doesn't stop because of EINTR"""
+    while True:
+        try:
+            return fn()
+        except IOError, e:
+            if e.errno != errno.EINTR:
+                raise
 
 
 def message(tag, msg, explanatory_text=None, do_newline=False):
-  if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
-    return
-  message.old_tag = tag
-  message.old_msg = msg
-  while True:
-    try:
-      if platform_string() == 'windows' or not sys.stdout.isatty():
-        if explanatory_text:
-          logging.info(explanatory_text)
-        logging.info('%s: %s', tag, msg)
-      else:
-        sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
-            _BEGINNING_OF_LINE,
-            _CLEAR_LINE,
-            '\n%s' % explanatory_text if explanatory_text is not None else '',
-            _COLORS[_TAG_COLOR[tag]][1],
-            _COLORS[_TAG_COLOR[tag]][0],
-            tag,
-            msg,
-            '\n' if do_newline or explanatory_text is not None else ''))
-      sys.stdout.flush()
-      return
-    except IOError, e:
-      if e.errno != errno.EINTR:
-        raise
+    if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
+        return
+    message.old_tag = tag
+    message.old_msg = msg
+    while True:
+        try:
+            if platform_string() == 'windows' or not sys.stdout.isatty():
+                if explanatory_text:
+                    logging.info(explanatory_text)
+                logging.info('%s: %s', tag, msg)
+            else:
+                sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
+                    _BEGINNING_OF_LINE, _CLEAR_LINE, '\n%s' % explanatory_text
+                    if explanatory_text is not None else '',
+                    _COLORS[_TAG_COLOR[tag]][1], _COLORS[_TAG_COLOR[tag]][0],
+                    tag, msg, '\n'
+                    if do_newline or explanatory_text is not None else ''))
+            sys.stdout.flush()
+            return
+        except IOError, e:
+            if e.errno != errno.EINTR:
+                raise
+
 
 message.old_tag = ''
 message.old_msg = ''
 
+
 def which(filename):
-  if '/' in filename:
-    return filename
-  for path in os.environ['PATH'].split(os.pathsep):
-    if os.path.exists(os.path.join(path, filename)):
-      return os.path.join(path, filename)
-  raise Exception('%s not found' % filename)
+    if '/' in filename:
+        return filename
+    for path in os.environ['PATH'].split(os.pathsep):
+        if os.path.exists(os.path.join(path, filename)):
+            return os.path.join(path, filename)
+    raise Exception('%s not found' % filename)
 
 
 class JobSpec(object):
-  """Specifies what to run for a job."""
+    """Specifies what to run for a job."""
 
-  def __init__(self, cmdline, shortname=None, environ=None,
-               cwd=None, shell=False, timeout_seconds=5*60, flake_retries=0,
-               timeout_retries=0, kill_handler=None, cpu_cost=1.0,
-               verbose_success=False):
-    """
+    def __init__(self,
+                 cmdline,
+                 shortname=None,
+                 environ=None,
+                 cwd=None,
+                 shell=False,
+                 timeout_seconds=5 * 60,
+                 flake_retries=0,
+                 timeout_retries=0,
+                 kill_handler=None,
+                 cpu_cost=1.0,
+                 verbose_success=False):
+        """
     Arguments:
       cmdline: a list of arguments to pass as the command line
       environ: a dictionary of environment variables to set in the child process
       kill_handler: a handler that will be called whenever job.kill() is invoked
       cpu_cost: number of cores per second this job needs
     """
-    if environ is None:
-      environ = {}
-    self.cmdline = cmdline
-    self.environ = environ
-    self.shortname = cmdline[0] if shortname is None else shortname
-    self.cwd = cwd
-    self.shell = shell
-    self.timeout_seconds = timeout_seconds
-    self.flake_retries = flake_retries
-    self.timeout_retries = timeout_retries
-    self.kill_handler = kill_handler
-    self.cpu_cost = cpu_cost
-    self.verbose_success = verbose_success
+        if environ is None:
+            environ = {}
+        self.cmdline = cmdline
+        self.environ = environ
+        self.shortname = cmdline[0] if shortname is None else shortname
+        self.cwd = cwd
+        self.shell = shell
+        self.timeout_seconds = timeout_seconds
+        self.flake_retries = flake_retries
+        self.timeout_retries = timeout_retries
+        self.kill_handler = kill_handler
+        self.cpu_cost = cpu_cost
+        self.verbose_success = verbose_success
 
-  def identity(self):
-    return '%r %r' % (self.cmdline, self.environ)
+    def identity(self):
+        return '%r %r' % (self.cmdline, self.environ)
 
-  def __hash__(self):
-    return hash(self.identity())
+    def __hash__(self):
+        return hash(self.identity())
 
-  def __cmp__(self, other):
-    return self.identity() == other.identity()
+    def __cmp__(self, other):
+        return self.identity() == other.identity()
 
-  def __repr__(self):
-    return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname, self.cmdline)
+    def __repr__(self):
+        return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname,
+                                                      self.cmdline)
 
-  def __str__(self):
-    return '%s: %s %s' % (self.shortname,
-                          ' '.join('%s=%s' % kv for kv in self.environ.items()),
-                          ' '.join(self.cmdline))
+    def __str__(self):
+        return '%s: %s %s' % (self.shortname, ' '.join(
+            '%s=%s' % kv
+            for kv in self.environ.items()), ' '.join(self.cmdline))
 
 
 class JobResult(object):
-  def __init__(self):
-    self.state = 'UNKNOWN'
-    self.returncode = -1
-    self.elapsed_time = 0
-    self.num_failures = 0
-    self.retries = 0
-    self.message = ''
-    self.cpu_estimated = 1
-    self.cpu_measured = 1
+
+    def __init__(self):
+        self.state = 'UNKNOWN'
+        self.returncode = -1
+        self.elapsed_time = 0
+        self.num_failures = 0
+        self.retries = 0
+        self.message = ''
+        self.cpu_estimated = 1
+        self.cpu_measured = 1
 
 
 def read_from_start(f):
-  f.seek(0)
-  return f.read()
+    f.seek(0)
+    return f.read()
 
 
 class Job(object):
-  """Manages one job."""
+    """Manages one job."""
 
-  def __init__(self, spec, newline_on_success, travis, add_env,
-               quiet_success=False):
-    self._spec = spec
-    self._newline_on_success = newline_on_success
-    self._travis = travis
-    self._add_env = add_env.copy()
-    self._retries = 0
-    self._timeout_retries = 0
-    self._suppress_failure_message = False
-    self._quiet_success = quiet_success
-    if not self._quiet_success:
-      message('START', spec.shortname, do_newline=self._travis)
-    self.result = JobResult()
-    self.start()
-
-  def GetSpec(self):
-    return self._spec
-
-  def start(self):
-    self._tempfile = tempfile.TemporaryFile()
-    env = dict(os.environ)
-    env.update(self._spec.environ)
-    env.update(self._add_env)
-    env = sanitized_environment(env)
-    self._start = time.time()
-    cmdline = self._spec.cmdline
-    # The Unix time command is finicky when used with MSBuild, so we don't use it
-    # with jobs that run MSBuild.
-    global measure_cpu_costs
-    if measure_cpu_costs and not 'vsprojects\\build' in cmdline[0]:
-      cmdline = ['time', '-p'] + cmdline
-    else:
-      measure_cpu_costs = False
-    try_start = lambda: subprocess.Popen(args=cmdline,
-                                         stderr=subprocess.STDOUT,
-                                         stdout=self._tempfile,
-                                         cwd=self._spec.cwd,
-                                         shell=self._spec.shell,
-                                         env=env)
-    delay = 0.3
-    for i in range(0, 4):
-      try:
-        self._process = try_start()
-        break
-      except OSError:
-        message('WARNING', 'Failed to start %s, retrying in %f seconds' % (self._spec.shortname, delay))
-        time.sleep(delay)
-        delay *= 2
-    else:
-      self._process = try_start()
-    self._state = _RUNNING
-
-  def state(self):
-    """Poll current state of the job. Prints messages at completion."""
-    def stdout(self=self):
-      stdout = read_from_start(self._tempfile)
-      self.result.message = stdout[-_MAX_RESULT_SIZE:]
-      return stdout
-    if self._state == _RUNNING and self._process.poll() is not None:
-      elapsed = time.time() - self._start
-      self.result.elapsed_time = elapsed
-      if self._process.returncode != 0:
-        if self._retries < self._spec.flake_retries:
-          message('FLAKE', '%s [ret=%d, pid=%d]' % (
-            self._spec.shortname, self._process.returncode, self._process.pid),
-            stdout(), do_newline=True)
-          self._retries += 1
-          self.result.num_failures += 1
-          self.result.retries = self._timeout_retries + self._retries
-          # NOTE: job is restarted regardless of jobset's max_time setting
-          self.start()
-        else:
-          self._state = _FAILURE
-          if not self._suppress_failure_message:
-            message('FAILED', '%s [ret=%d, pid=%d, time=%.1fsec]' % (
-                self._spec.shortname, self._process.returncode, self._process.pid, elapsed),
-                stdout(), do_newline=True)
-          self.result.state = 'FAILED'
-          self.result.num_failures += 1
-          self.result.returncode = self._process.returncode
-      else:
-        self._state = _SUCCESS
-        measurement = ''
-        if measure_cpu_costs:
-          m = re.search(r'real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)', stdout())
-          real = float(m.group(1))
-          user = float(m.group(2))
-          sys = float(m.group(3))
-          if real > 0.5:
-            cores = (user + sys) / real
-            self.result.cpu_measured = float('%.01f' % cores)
-            self.result.cpu_estimated = float('%.01f' % self._spec.cpu_cost)
-            measurement = '; cpu_cost=%.01f; estimated=%.01f' % (self.result.cpu_measured, self.result.cpu_estimated)
+    def __init__(self,
+                 spec,
+                 newline_on_success,
+                 travis,
+                 add_env,
+                 quiet_success=False):
+        self._spec = spec
+        self._newline_on_success = newline_on_success
+        self._travis = travis
+        self._add_env = add_env.copy()
+        self._retries = 0
+        self._timeout_retries = 0
+        self._suppress_failure_message = False
+        self._quiet_success = quiet_success
         if not self._quiet_success:
-          message('PASSED', '%s [time=%.1fsec, retries=%d:%d%s]' % (
-              self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement),
-              stdout() if self._spec.verbose_success else None,
-              do_newline=self._newline_on_success or self._travis)
-        self.result.state = 'PASSED'
-    elif (self._state == _RUNNING and
-          self._spec.timeout_seconds is not None and
-          time.time() - self._start > self._spec.timeout_seconds):
-      elapsed = time.time() - self._start
-      self.result.elapsed_time = elapsed
-      if self._timeout_retries < self._spec.timeout_retries:
-        message('TIMEOUT_FLAKE', '%s [pid=%d]' % (self._spec.shortname, self._process.pid), stdout(), do_newline=True)
-        self._timeout_retries += 1
-        self.result.num_failures += 1
-        self.result.retries = self._timeout_retries + self._retries
-        if self._spec.kill_handler:
-          self._spec.kill_handler(self)
-        self._process.terminate()
-        # NOTE: job is restarted regardless of jobset's max_time setting
+            message('START', spec.shortname, do_newline=self._travis)
+        self.result = JobResult()
         self.start()
-      else:
-        message('TIMEOUT', '%s [pid=%d, time=%.1fsec]' % (self._spec.shortname, self._process.pid, elapsed), stdout(), do_newline=True)
-        self.kill()
-        self.result.state = 'TIMEOUT'
-        self.result.num_failures += 1
-    return self._state
 
-  def kill(self):
-    if self._state == _RUNNING:
-      self._state = _KILLED
-      if self._spec.kill_handler:
-        self._spec.kill_handler(self)
-      self._process.terminate()
+    def GetSpec(self):
+        return self._spec
 
-  def suppress_failure_message(self):
-    self._suppress_failure_message = True
+    def start(self):
+        self._tempfile = tempfile.TemporaryFile()
+        env = dict(os.environ)
+        env.update(self._spec.environ)
+        env.update(self._add_env)
+        env = sanitized_environment(env)
+        self._start = time.time()
+        cmdline = self._spec.cmdline
+        # The Unix time command is finicky when used with MSBuild, so we don't use it
+        # with jobs that run MSBuild.
+        global measure_cpu_costs
+        if measure_cpu_costs and not 'vsprojects\\build' in cmdline[0]:
+            cmdline = ['time', '-p'] + cmdline
+        else:
+            measure_cpu_costs = False
+        try_start = lambda: subprocess.Popen(args=cmdline,
+                                             stderr=subprocess.STDOUT,
+                                             stdout=self._tempfile,
+                                             cwd=self._spec.cwd,
+                                             shell=self._spec.shell,
+                                             env=env)
+        delay = 0.3
+        for i in range(0, 4):
+            try:
+                self._process = try_start()
+                break
+            except OSError:
+                message('WARNING', 'Failed to start %s, retrying in %f seconds'
+                        % (self._spec.shortname, delay))
+                time.sleep(delay)
+                delay *= 2
+        else:
+            self._process = try_start()
+        self._state = _RUNNING
+
+    def state(self):
+        """Poll current state of the job. Prints messages at completion."""
+
+        def stdout(self=self):
+            stdout = read_from_start(self._tempfile)
+            self.result.message = stdout[-_MAX_RESULT_SIZE:]
+            return stdout
+
+        if self._state == _RUNNING and self._process.poll() is not None:
+            elapsed = time.time() - self._start
+            self.result.elapsed_time = elapsed
+            if self._process.returncode != 0:
+                if self._retries < self._spec.flake_retries:
+                    message(
+                        'FLAKE',
+                        '%s [ret=%d, pid=%d]' %
+                        (self._spec.shortname, self._process.returncode,
+                         self._process.pid),
+                        stdout(),
+                        do_newline=True)
+                    self._retries += 1
+                    self.result.num_failures += 1
+                    self.result.retries = self._timeout_retries + self._retries
+                    # NOTE: job is restarted regardless of jobset's max_time setting
+                    self.start()
+                else:
+                    self._state = _FAILURE
+                    if not self._suppress_failure_message:
+                        message(
+                            'FAILED',
+                            '%s [ret=%d, pid=%d, time=%.1fsec]' %
+                            (self._spec.shortname, self._process.returncode,
+                             self._process.pid, elapsed),
+                            stdout(),
+                            do_newline=True)
+                    self.result.state = 'FAILED'
+                    self.result.num_failures += 1
+                    self.result.returncode = self._process.returncode
+            else:
+                self._state = _SUCCESS
+                measurement = ''
+                if measure_cpu_costs:
+                    m = re.search(
+                        r'real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)',
+                        stdout())
+                    real = float(m.group(1))
+                    user = float(m.group(2))
+                    sys = float(m.group(3))
+                    if real > 0.5:
+                        cores = (user + sys) / real
+                        self.result.cpu_measured = float('%.01f' % cores)
+                        self.result.cpu_estimated = float('%.01f' %
+                                                          self._spec.cpu_cost)
+                        measurement = '; cpu_cost=%.01f; estimated=%.01f' % (
+                            self.result.cpu_measured, self.result.cpu_estimated)
+                if not self._quiet_success:
+                    message(
+                        'PASSED',
+                        '%s [time=%.1fsec, retries=%d:%d%s]' %
+                        (self._spec.shortname, elapsed, self._retries,
+                         self._timeout_retries, measurement),
+                        stdout() if self._spec.verbose_success else None,
+                        do_newline=self._newline_on_success or self._travis)
+                self.result.state = 'PASSED'
+        elif (self._state == _RUNNING and
+              self._spec.timeout_seconds is not None and
+              time.time() - self._start > self._spec.timeout_seconds):
+            elapsed = time.time() - self._start
+            self.result.elapsed_time = elapsed
+            if self._timeout_retries < self._spec.timeout_retries:
+                message(
+                    'TIMEOUT_FLAKE',
+                    '%s [pid=%d]' % (self._spec.shortname, self._process.pid),
+                    stdout(),
+                    do_newline=True)
+                self._timeout_retries += 1
+                self.result.num_failures += 1
+                self.result.retries = self._timeout_retries + self._retries
+                if self._spec.kill_handler:
+                    self._spec.kill_handler(self)
+                self._process.terminate()
+                # NOTE: job is restarted regardless of jobset's max_time setting
+                self.start()
+            else:
+                message(
+                    'TIMEOUT',
+                    '%s [pid=%d, time=%.1fsec]' %
+                    (self._spec.shortname, self._process.pid, elapsed),
+                    stdout(),
+                    do_newline=True)
+                self.kill()
+                self.result.state = 'TIMEOUT'
+                self.result.num_failures += 1
+        return self._state
+
+    def kill(self):
+        if self._state == _RUNNING:
+            self._state = _KILLED
+            if self._spec.kill_handler:
+                self._spec.kill_handler(self)
+            self._process.terminate()
+
+    def suppress_failure_message(self):
+        self._suppress_failure_message = True
 
 
 class Jobset(object):
-  """Manages one run of jobs."""
+    """Manages one run of jobs."""
 
-  def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic, newline_on_success, travis,
-               stop_on_failure, add_env, quiet_success, max_time):
-    self._running = set()
-    self._check_cancelled = check_cancelled
-    self._cancelled = False
-    self._failures = 0
-    self._completed = 0
-    self._maxjobs = maxjobs
-    self._maxjobs_cpu_agnostic = maxjobs_cpu_agnostic
-    self._newline_on_success = newline_on_success
-    self._travis = travis
-    self._stop_on_failure = stop_on_failure
-    self._add_env = add_env
-    self._quiet_success = quiet_success
-    self._max_time = max_time
-    self.resultset = {}
-    self._remaining = None
-    self._start_time = time.time()
+    def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic,
+                 newline_on_success, travis, stop_on_failure, add_env,
+                 quiet_success, max_time):
+        self._running = set()
+        self._check_cancelled = check_cancelled
+        self._cancelled = False
+        self._failures = 0
+        self._completed = 0
+        self._maxjobs = maxjobs
+        self._maxjobs_cpu_agnostic = maxjobs_cpu_agnostic
+        self._newline_on_success = newline_on_success
+        self._travis = travis
+        self._stop_on_failure = stop_on_failure
+        self._add_env = add_env
+        self._quiet_success = quiet_success
+        self._max_time = max_time
+        self.resultset = {}
+        self._remaining = None
+        self._start_time = time.time()
 
-  def set_remaining(self, remaining):
-    self._remaining = remaining
+    def set_remaining(self, remaining):
+        self._remaining = remaining
 
-  def get_num_failures(self):
-    return self._failures
+    def get_num_failures(self):
+        return self._failures
 
-  def cpu_cost(self):
-    c = 0
-    for job in self._running:
-      c += job._spec.cpu_cost
-    return c
+    def cpu_cost(self):
+        c = 0
+        for job in self._running:
+            c += job._spec.cpu_cost
+        return c
 
-  def start(self, spec):
-    """Start a job. Return True on success, False on failure."""
-    while True:
-      if self._max_time > 0 and time.time() - self._start_time > self._max_time:
-        skipped_job_result = JobResult()
-        skipped_job_result.state = 'SKIPPED'
-        message('SKIPPED', spec.shortname, do_newline=True)
-        self.resultset[spec.shortname] = [skipped_job_result]
+    def start(self, spec):
+        """Start a job. Return True on success, False on failure."""
+        while True:
+            if self._max_time > 0 and time.time(
+            ) - self._start_time > self._max_time:
+                skipped_job_result = JobResult()
+                skipped_job_result.state = 'SKIPPED'
+                message('SKIPPED', spec.shortname, do_newline=True)
+                self.resultset[spec.shortname] = [skipped_job_result]
+                return True
+            if self.cancelled(): return False
+            current_cpu_cost = self.cpu_cost()
+            if current_cpu_cost == 0: break
+            if current_cpu_cost + spec.cpu_cost <= self._maxjobs:
+                if len(self._running) < self._maxjobs_cpu_agnostic:
+                    break
+            self.reap(spec.shortname, spec.cpu_cost)
+        if self.cancelled(): return False
+        job = Job(spec, self._newline_on_success, self._travis, self._add_env,
+                  self._quiet_success)
+        self._running.add(job)
+        if job.GetSpec().shortname not in self.resultset:
+            self.resultset[job.GetSpec().shortname] = []
         return True
-      if self.cancelled(): return False
-      current_cpu_cost = self.cpu_cost()
-      if current_cpu_cost == 0: break
-      if current_cpu_cost + spec.cpu_cost <= self._maxjobs:
-        if len(self._running) < self._maxjobs_cpu_agnostic:
-          break
-      self.reap(spec.shortname, spec.cpu_cost)
-    if self.cancelled(): return False
-    job = Job(spec,
-              self._newline_on_success,
-              self._travis,
-              self._add_env,
-              self._quiet_success)
-    self._running.add(job)
-    if job.GetSpec().shortname not in self.resultset:
-      self.resultset[job.GetSpec().shortname] = []
-    return True
 
-  def reap(self, waiting_for=None, waiting_for_cost=None):
-    """Collect the dead jobs."""
-    while self._running:
-      dead = set()
-      for job in self._running:
-        st = eintr_be_gone(lambda: job.state())
-        if st == _RUNNING: continue
-        if st == _FAILURE or st == _KILLED:
-          self._failures += 1
-          if self._stop_on_failure:
-            self._cancelled = True
+    def reap(self, waiting_for=None, waiting_for_cost=None):
+        """Collect the dead jobs."""
+        while self._running:
+            dead = set()
             for job in self._running:
-              job.kill()
-        dead.add(job)
-        break
-      for job in dead:
-        self._completed += 1
-        if not self._quiet_success or job.result.state != 'PASSED':
-          self.resultset[job.GetSpec().shortname].append(job.result)
-        self._running.remove(job)
-      if dead: return
-      if not self._travis and platform_string() != 'windows':
-        rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
-        if self._remaining is not None and self._completed > 0:
-          now = time.time()
-          sofar = now - self._start_time
-          remaining = sofar / self._completed * (self._remaining + len(self._running))
-          rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
-        if waiting_for is not None:
-          wstr = ' next: %s @ %.2f cpu' % (waiting_for, waiting_for_cost)
-        else:
-          wstr = ''
-        message('WAITING', '%s%d jobs running, %d complete, %d failed (load %.2f)%s' % (
-            rstr, len(self._running), self._completed, self._failures, self.cpu_cost(), wstr))
-      if platform_string() == 'windows':
-        time.sleep(0.1)
-      else:
-        signal.alarm(10)
-        signal.pause()
+                st = eintr_be_gone(lambda: job.state())
+                if st == _RUNNING: continue
+                if st == _FAILURE or st == _KILLED:
+                    self._failures += 1
+                    if self._stop_on_failure:
+                        self._cancelled = True
+                        for job in self._running:
+                            job.kill()
+                dead.add(job)
+                break
+            for job in dead:
+                self._completed += 1
+                if not self._quiet_success or job.result.state != 'PASSED':
+                    self.resultset[job.GetSpec().shortname].append(job.result)
+                self._running.remove(job)
+            if dead: return
+            if not self._travis and platform_string() != 'windows':
+                rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
+                if self._remaining is not None and self._completed > 0:
+                    now = time.time()
+                    sofar = now - self._start_time
+                    remaining = sofar / self._completed * (
+                        self._remaining + len(self._running))
+                    rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
+                if waiting_for is not None:
+                    wstr = ' next: %s @ %.2f cpu' % (waiting_for,
+                                                     waiting_for_cost)
+                else:
+                    wstr = ''
+                message(
+                    'WAITING',
+                    '%s%d jobs running, %d complete, %d failed (load %.2f)%s' %
+                    (rstr, len(self._running), self._completed, self._failures,
+                     self.cpu_cost(), wstr))
+            if platform_string() == 'windows':
+                time.sleep(0.1)
+            else:
+                signal.alarm(10)
+                signal.pause()
 
-  def cancelled(self):
-    """Poll for cancellation."""
-    if self._cancelled: return True
-    if not self._check_cancelled(): return False
-    for job in self._running:
-      job.kill()
-    self._cancelled = True
-    return True
+    def cancelled(self):
+        """Poll for cancellation."""
+        if self._cancelled: return True
+        if not self._check_cancelled(): return False
+        for job in self._running:
+            job.kill()
+        self._cancelled = True
+        return True
 
-  def finish(self):
-    while self._running:
-      if self.cancelled(): pass  # poll cancellation
-      self.reap()
-    if platform_string() != 'windows':
-      signal.alarm(0)
-    return not self.cancelled() and self._failures == 0
+    def finish(self):
+        while self._running:
+            if self.cancelled(): pass  # poll cancellation
+            self.reap()
+        if platform_string() != 'windows':
+            signal.alarm(0)
+        return not self.cancelled() and self._failures == 0
 
 
 def _never_cancelled():
-  return False
+    return False
 
 
 def tag_remaining(xs):
-  staging = []
-  for x in xs:
-    staging.append(x)
-    if len(staging) > 5000:
-      yield (staging.pop(0), None)
-  n = len(staging)
-  for i, x in enumerate(staging):
-    yield (x, n - i - 1)
+    staging = []
+    for x in xs:
+        staging.append(x)
+        if len(staging) > 5000:
+            yield (staging.pop(0), None)
+    n = len(staging)
+    for i, x in enumerate(staging):
+        yield (x, n - i - 1)
 
 
 def run(cmdlines,
@@ -511,23 +548,23 @@
         skip_jobs=False,
         quiet_success=False,
         max_time=-1):
-  if skip_jobs:
-    resultset = {}
-    skipped_job_result = JobResult()
-    skipped_job_result.state = 'SKIPPED'
-    for job in cmdlines:
-      message('SKIPPED', job.shortname, do_newline=True)
-      resultset[job.shortname] = [skipped_job_result]
-    return 0, resultset
-  js = Jobset(check_cancelled,
-              maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
-              maxjobs_cpu_agnostic if maxjobs_cpu_agnostic is not None else _DEFAULT_MAX_JOBS,
-              newline_on_success, travis, stop_on_failure, add_env,
-              quiet_success, max_time)
-  for cmdline, remaining in tag_remaining(cmdlines):
-    if not js.start(cmdline):
-      break
-    if remaining is not None:
-      js.set_remaining(remaining)
-  js.finish()
-  return js.get_num_failures(), js.resultset
+    if skip_jobs:
+        resultset = {}
+        skipped_job_result = JobResult()
+        skipped_job_result.state = 'SKIPPED'
+        for job in cmdlines:
+            message('SKIPPED', job.shortname, do_newline=True)
+            resultset[job.shortname] = [skipped_job_result]
+        return 0, resultset
+    js = Jobset(check_cancelled, maxjobs if maxjobs is not None else
+                _DEFAULT_MAX_JOBS, maxjobs_cpu_agnostic
+                if maxjobs_cpu_agnostic is not None else _DEFAULT_MAX_JOBS,
+                newline_on_success, travis, stop_on_failure, add_env,
+                quiet_success, max_time)
+    for cmdline, remaining in tag_remaining(cmdlines):
+        if not js.start(cmdline):
+            break
+        if remaining is not None:
+            js.set_remaining(remaining)
+    js.finish()
+    return js.get_num_failures(), js.resultset
diff --git a/tools/run_tests/python_utils/port_server.py b/tools/run_tests/python_utils/port_server.py
index e8ac71a..83e09c0 100755
--- a/tools/run_tests/python_utils/port_server.py
+++ b/tools/run_tests/python_utils/port_server.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Manage TCP ports for unit tests; started by run_tests.py"""
 
 import argparse
@@ -27,17 +26,14 @@
 import threading
 import platform
 
-
 # increment this number whenever making a change to ensure that
 # the changes are picked up by running CI servers
 # note that all changes must be backwards compatible
 _MY_VERSION = 20
 
-
 if len(sys.argv) == 2 and sys.argv[1] == 'dump_version':
-  print _MY_VERSION
-  sys.exit(0)
-
+    print _MY_VERSION
+    sys.exit(0)
 
 argp = argparse.ArgumentParser(description='Server for httpcli_test')
 argp.add_argument('-p', '--port', default=12345, type=int)
@@ -45,11 +41,11 @@
 args = argp.parse_args()
 
 if args.logfile is not None:
-  sys.stdin.close()
-  sys.stderr.close()
-  sys.stdout.close()
-  sys.stderr = open(args.logfile, 'w')
-  sys.stdout = sys.stderr
+    sys.stdin.close()
+    sys.stderr.close()
+    sys.stdout.close()
+    sys.stderr = open(args.logfile, 'w')
+    sys.stdout = sys.stderr
 
 print 'port server running on port %d' % args.port
 
@@ -61,74 +57,81 @@
 # https://cs.chromium.org/chromium/src/net/base/port_util.cc). When one of these
 # ports is used in a Cronet test, the test would fail (see issue #12149). These
 # ports must be excluded from pool.
-cronet_restricted_ports = [1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37,
-                           42, 43, 53, 77, 79, 87, 95, 101, 102, 103, 104, 109,
-                           110, 111, 113, 115, 117, 119, 123, 135, 139, 143,
-                           179, 389, 465, 512, 513, 514, 515, 526, 530, 531,
-                           532, 540, 556, 563, 587, 601, 636, 993, 995, 2049,
-                           3659, 4045, 6000, 6665, 6666, 6667, 6668, 6669, 6697]
+cronet_restricted_ports = [
+    1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37, 42, 43, 53, 77, 79, 87,
+    95, 101, 102, 103, 104, 109, 110, 111, 113, 115, 117, 119, 123, 135, 139,
+    143, 179, 389, 465, 512, 513, 514, 515, 526, 530, 531, 532, 540, 556, 563,
+    587, 601, 636, 993, 995, 2049, 3659, 4045, 6000, 6665, 6666, 6667, 6668,
+    6669, 6697
+]
+
 
 def can_connect(port):
-  # this test is only really useful on unices where SO_REUSE_PORT is available
-  # so on Windows, where this test is expensive, skip it
-  if platform.system() == 'Windows': return False
-  s = socket.socket()
-  try:
-    s.connect(('localhost', port))
-    return True
-  except socket.error, e:
-    return False
-  finally:
-    s.close()
+    # this test is only really useful on unices where SO_REUSE_PORT is available
+    # so on Windows, where this test is expensive, skip it
+    if platform.system() == 'Windows': return False
+    s = socket.socket()
+    try:
+        s.connect(('localhost', port))
+        return True
+    except socket.error, e:
+        return False
+    finally:
+        s.close()
+
 
 def can_bind(port, proto):
-  s = socket.socket(proto, socket.SOCK_STREAM)
-  s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
-  try:
-    s.bind(('localhost', port))
-    return True
-  except socket.error, e:
-    return False
-  finally:
-    s.close()
+    s = socket.socket(proto, socket.SOCK_STREAM)
+    s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+    try:
+        s.bind(('localhost', port))
+        return True
+    except socket.error, e:
+        return False
+    finally:
+        s.close()
 
 
 def refill_pool(max_timeout, req):
-  """Scan for ports not marked for being in use"""
-  chk = [port for port in list(range(1025, 32766)) if port not in cronet_restricted_ports]
-  random.shuffle(chk)
-  for i in chk:
-    if len(pool) > 100: break
-    if i in in_use:
-      age = time.time() - in_use[i]
-      if age < max_timeout:
-        continue
-      req.log_message("kill old request %d" % i)
-      del in_use[i]
-    if can_bind(i, socket.AF_INET) and can_bind(i, socket.AF_INET6) and not can_connect(i):
-      req.log_message("found available port %d" % i)
-      pool.append(i)
+    """Scan for ports not marked for being in use"""
+    chk = [
+        port for port in list(range(1025, 32766))
+        if port not in cronet_restricted_ports
+    ]
+    random.shuffle(chk)
+    for i in chk:
+        if len(pool) > 100: break
+        if i in in_use:
+            age = time.time() - in_use[i]
+            if age < max_timeout:
+                continue
+            req.log_message("kill old request %d" % i)
+            del in_use[i]
+        if can_bind(i, socket.AF_INET) and can_bind(
+                i, socket.AF_INET6) and not can_connect(i):
+            req.log_message("found available port %d" % i)
+            pool.append(i)
 
 
 def allocate_port(req):
-  global pool
-  global in_use
-  global mu
-  mu.acquire()
-  max_timeout = 600
-  while not pool:
-    refill_pool(max_timeout, req)
-    if not pool:
-      req.log_message("failed to find ports: retrying soon")
-      mu.release()
-      time.sleep(1)
-      mu.acquire()
-      max_timeout /= 2
-  port = pool[0]
-  pool = pool[1:]
-  in_use[port] = time.time()
-  mu.release()
-  return port
+    global pool
+    global in_use
+    global mu
+    mu.acquire()
+    max_timeout = 600
+    while not pool:
+        refill_pool(max_timeout, req)
+        if not pool:
+            req.log_message("failed to find ports: retrying soon")
+            mu.release()
+            time.sleep(1)
+            mu.acquire()
+            max_timeout /= 2
+    port = pool[0]
+    pool = pool[1:]
+    in_use[port] = time.time()
+    mu.release()
+    return port
 
 
 keep_running = True
@@ -136,61 +139,68 @@
 
 class Handler(BaseHTTPRequestHandler):
 
-  def setup(self):
-    # If the client is unreachable for 5 seconds, close the connection
-    self.timeout = 5
-    BaseHTTPRequestHandler.setup(self)
+    def setup(self):
+        # If the client is unreachable for 5 seconds, close the connection
+        self.timeout = 5
+        BaseHTTPRequestHandler.setup(self)
 
-  def do_GET(self):
-    global keep_running
-    global mu
-    if self.path == '/get':
-      # allocate a new port, it will stay bound for ten minutes and until
-      # it's unused
-      self.send_response(200)
-      self.send_header('Content-Type', 'text/plain')
-      self.end_headers()
-      p = allocate_port(self)
-      self.log_message('allocated port %d' % p)
-      self.wfile.write('%d' % p)
-    elif self.path[0:6] == '/drop/':
-      self.send_response(200)
-      self.send_header('Content-Type', 'text/plain')
-      self.end_headers()
-      p = int(self.path[6:])
-      mu.acquire()
-      if p in in_use:
-        del in_use[p]
-        pool.append(p)
-        k = 'known'
-      else:
-        k = 'unknown'
-      mu.release()
-      self.log_message('drop %s port %d' % (k, p))
-    elif self.path == '/version_number':
-      # fetch a version string and the current process pid
-      self.send_response(200)
-      self.send_header('Content-Type', 'text/plain')
-      self.end_headers()
-      self.wfile.write(_MY_VERSION)
-    elif self.path == '/dump':
-      # yaml module is not installed on Macs and Windows machines by default
-      # so we import it lazily (/dump action is only used for debugging)
-      import yaml
-      self.send_response(200)
-      self.send_header('Content-Type', 'text/plain')
-      self.end_headers()
-      mu.acquire()
-      now = time.time()
-      out = yaml.dump({'pool': pool, 'in_use': dict((k, now - v) for k, v in in_use.items())})
-      mu.release()
-      self.wfile.write(out)
-    elif self.path == '/quitquitquit':
-      self.send_response(200)
-      self.end_headers()
-      self.server.shutdown()
+    def do_GET(self):
+        global keep_running
+        global mu
+        if self.path == '/get':
+            # allocate a new port, it will stay bound for ten minutes and until
+            # it's unused
+            self.send_response(200)
+            self.send_header('Content-Type', 'text/plain')
+            self.end_headers()
+            p = allocate_port(self)
+            self.log_message('allocated port %d' % p)
+            self.wfile.write('%d' % p)
+        elif self.path[0:6] == '/drop/':
+            self.send_response(200)
+            self.send_header('Content-Type', 'text/plain')
+            self.end_headers()
+            p = int(self.path[6:])
+            mu.acquire()
+            if p in in_use:
+                del in_use[p]
+                pool.append(p)
+                k = 'known'
+            else:
+                k = 'unknown'
+            mu.release()
+            self.log_message('drop %s port %d' % (k, p))
+        elif self.path == '/version_number':
+            # fetch a version string and the current process pid
+            self.send_response(200)
+            self.send_header('Content-Type', 'text/plain')
+            self.end_headers()
+            self.wfile.write(_MY_VERSION)
+        elif self.path == '/dump':
+            # yaml module is not installed on Macs and Windows machines by default
+            # so we import it lazily (/dump action is only used for debugging)
+            import yaml
+            self.send_response(200)
+            self.send_header('Content-Type', 'text/plain')
+            self.end_headers()
+            mu.acquire()
+            now = time.time()
+            out = yaml.dump({
+                'pool':
+                pool,
+                'in_use':
+                dict((k, now - v) for k, v in in_use.items())
+            })
+            mu.release()
+            self.wfile.write(out)
+        elif self.path == '/quitquitquit':
+            self.send_response(200)
+            self.end_headers()
+            self.server.shutdown()
+
 
 class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
-  """Handle requests in a separate thread"""
+    """Handle requests in a separate thread"""
+
 
 ThreadedHTTPServer(('', args.port), Handler).serve_forever()
diff --git a/tools/run_tests/python_utils/report_utils.py b/tools/run_tests/python_utils/report_utils.py
index a386780..e4fddb8 100644
--- a/tools/run_tests/python_utils/report_utils.py
+++ b/tools/run_tests/python_utils/report_utils.py
@@ -11,17 +11,16 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Generate XML and HTML test reports."""
 
 from __future__ import print_function
 
 try:
-  from mako.runtime import Context
-  from mako.template import Template
-  from mako import exceptions
+    from mako.runtime import Context
+    from mako.template import Template
+    from mako import exceptions
 except (ImportError):
-  pass  # Mako not installed but it is ok.
+    pass  # Mako not installed but it is ok.
 import datetime
 import os
 import string
@@ -30,111 +29,127 @@
 
 
 def _filter_msg(msg, output_format):
-  """Filters out nonprintable and illegal characters from the message."""
-  if output_format in ['XML', 'HTML']:
-    # keep whitespaces but remove formfeed and vertical tab characters
-    # that make XML report unparseable.
-    filtered_msg = filter(
-        lambda x: x in string.printable and x != '\f' and x != '\v',
-        msg.decode('UTF-8', 'ignore'))
-    if output_format == 'HTML':
-      filtered_msg = filtered_msg.replace('"', '&quot;')
-    return filtered_msg
-  else:
-    return msg
+    """Filters out nonprintable and illegal characters from the message."""
+    if output_format in ['XML', 'HTML']:
+        # keep whitespaces but remove formfeed and vertical tab characters
+        # that make XML report unparseable.
+        filtered_msg = filter(
+            lambda x: x in string.printable and x != '\f' and x != '\v',
+            msg.decode('UTF-8', 'ignore'))
+        if output_format == 'HTML':
+            filtered_msg = filtered_msg.replace('"', '&quot;')
+        return filtered_msg
+    else:
+        return msg
 
 
 def new_junit_xml_tree():
-  return ET.ElementTree(ET.Element('testsuites'))
+    return ET.ElementTree(ET.Element('testsuites'))
 
-def render_junit_xml_report(resultset, report_file, suite_package='grpc',
+
+def render_junit_xml_report(resultset,
+                            report_file,
+                            suite_package='grpc',
                             suite_name='tests'):
-  """Generate JUnit-like XML report."""
-  tree = new_junit_xml_tree()
-  append_junit_xml_results(tree, resultset, suite_package, suite_name, '1')
-  create_xml_report_file(tree, report_file)
+    """Generate JUnit-like XML report."""
+    tree = new_junit_xml_tree()
+    append_junit_xml_results(tree, resultset, suite_package, suite_name, '1')
+    create_xml_report_file(tree, report_file)
+
 
 def create_xml_report_file(tree, report_file):
-  """Generate JUnit-like report file from xml tree ."""
-  # ensure the report directory exists
-  report_dir = os.path.dirname(os.path.abspath(report_file))
-  if not os.path.exists(report_dir):
-    os.makedirs(report_dir)
-  tree.write(report_file, encoding='UTF-8')
+    """Generate JUnit-like report file from xml tree ."""
+    # ensure the report directory exists
+    report_dir = os.path.dirname(os.path.abspath(report_file))
+    if not os.path.exists(report_dir):
+        os.makedirs(report_dir)
+    tree.write(report_file, encoding='UTF-8')
+
 
 def append_junit_xml_results(tree, resultset, suite_package, suite_name, id):
-  """Append a JUnit-like XML report tree with test results as a new suite."""
-  testsuite = ET.SubElement(tree.getroot(), 'testsuite',
-                            id=id, package=suite_package, name=suite_name,
-                            timestamp=datetime.datetime.now().isoformat())
-  failure_count  = 0
-  error_count = 0
-  for shortname, results in six.iteritems(resultset):
-    for result in results:
-      xml_test = ET.SubElement(testsuite, 'testcase', name=shortname)
-      if result.elapsed_time:
-        xml_test.set('time', str(result.elapsed_time))
-      filtered_msg =  _filter_msg(result.message, 'XML')
-      if result.state == 'FAILED':
-        ET.SubElement(xml_test, 'failure', message='Failure').text = filtered_msg
-        failure_count += 1
-      elif result.state == 'TIMEOUT':
-        ET.SubElement(xml_test, 'error', message='Timeout').text = filtered_msg
-        error_count += 1
-      elif result.state == 'SKIPPED':
-        ET.SubElement(xml_test, 'skipped', message='Skipped')
-  testsuite.set('failures', str(failure_count))
-  testsuite.set('errors', str(error_count))
+    """Append a JUnit-like XML report tree with test results as a new suite."""
+    testsuite = ET.SubElement(
+        tree.getroot(),
+        'testsuite',
+        id=id,
+        package=suite_package,
+        name=suite_name,
+        timestamp=datetime.datetime.now().isoformat())
+    failure_count = 0
+    error_count = 0
+    for shortname, results in six.iteritems(resultset):
+        for result in results:
+            xml_test = ET.SubElement(testsuite, 'testcase', name=shortname)
+            if result.elapsed_time:
+                xml_test.set('time', str(result.elapsed_time))
+            filtered_msg = _filter_msg(result.message, 'XML')
+            if result.state == 'FAILED':
+                ET.SubElement(
+                    xml_test, 'failure', message='Failure').text = filtered_msg
+                failure_count += 1
+            elif result.state == 'TIMEOUT':
+                ET.SubElement(
+                    xml_test, 'error', message='Timeout').text = filtered_msg
+                error_count += 1
+            elif result.state == 'SKIPPED':
+                ET.SubElement(xml_test, 'skipped', message='Skipped')
+    testsuite.set('failures', str(failure_count))
+    testsuite.set('errors', str(error_count))
 
-def render_interop_html_report(
-  client_langs, server_langs, test_cases, auth_test_cases, http2_cases,
-  http2_server_cases, resultset,
-  num_failures, cloud_to_prod, prod_servers, http2_interop):
-  """Generate HTML report for interop tests."""
-  template_file = 'tools/run_tests/interop/interop_html_report.template'
-  try:
-    mytemplate = Template(filename=template_file, format_exceptions=True)
-  except NameError:
-    print('Mako template is not installed. Skipping HTML report generation.')
-    return
-  except IOError as e:
-    print('Failed to find the template %s: %s' % (template_file, e))
-    return
 
-  sorted_test_cases = sorted(test_cases)
-  sorted_auth_test_cases = sorted(auth_test_cases)
-  sorted_http2_cases = sorted(http2_cases)
-  sorted_http2_server_cases = sorted(http2_server_cases)
-  sorted_client_langs = sorted(client_langs)
-  sorted_server_langs = sorted(server_langs)
-  sorted_prod_servers = sorted(prod_servers)
+def render_interop_html_report(client_langs, server_langs, test_cases,
+                               auth_test_cases, http2_cases, http2_server_cases,
+                               resultset, num_failures, cloud_to_prod,
+                               prod_servers, http2_interop):
+    """Generate HTML report for interop tests."""
+    template_file = 'tools/run_tests/interop/interop_html_report.template'
+    try:
+        mytemplate = Template(filename=template_file, format_exceptions=True)
+    except NameError:
+        print(
+            'Mako template is not installed. Skipping HTML report generation.')
+        return
+    except IOError as e:
+        print('Failed to find the template %s: %s' % (template_file, e))
+        return
 
-  args = {'client_langs': sorted_client_langs,
-          'server_langs': sorted_server_langs,
-          'test_cases': sorted_test_cases,
-          'auth_test_cases': sorted_auth_test_cases,
-          'http2_cases': sorted_http2_cases,
-          'http2_server_cases': sorted_http2_server_cases,
-          'resultset': resultset,
-          'num_failures': num_failures,
-          'cloud_to_prod': cloud_to_prod,
-          'prod_servers': sorted_prod_servers,
-          'http2_interop': http2_interop}
+    sorted_test_cases = sorted(test_cases)
+    sorted_auth_test_cases = sorted(auth_test_cases)
+    sorted_http2_cases = sorted(http2_cases)
+    sorted_http2_server_cases = sorted(http2_server_cases)
+    sorted_client_langs = sorted(client_langs)
+    sorted_server_langs = sorted(server_langs)
+    sorted_prod_servers = sorted(prod_servers)
 
-  html_report_out_dir = 'reports'
-  if not os.path.exists(html_report_out_dir):
-    os.mkdir(html_report_out_dir)
-  html_file_path = os.path.join(html_report_out_dir, 'index.html')
-  try:
-    with open(html_file_path, 'w') as output_file:
-      mytemplate.render_context(Context(output_file, **args))
-  except:
-    print(exceptions.text_error_template().render())
-    raise
+    args = {
+        'client_langs': sorted_client_langs,
+        'server_langs': sorted_server_langs,
+        'test_cases': sorted_test_cases,
+        'auth_test_cases': sorted_auth_test_cases,
+        'http2_cases': sorted_http2_cases,
+        'http2_server_cases': sorted_http2_server_cases,
+        'resultset': resultset,
+        'num_failures': num_failures,
+        'cloud_to_prod': cloud_to_prod,
+        'prod_servers': sorted_prod_servers,
+        'http2_interop': http2_interop
+    }
+
+    html_report_out_dir = 'reports'
+    if not os.path.exists(html_report_out_dir):
+        os.mkdir(html_report_out_dir)
+    html_file_path = os.path.join(html_report_out_dir, 'index.html')
+    try:
+        with open(html_file_path, 'w') as output_file:
+            mytemplate.render_context(Context(output_file, **args))
+    except:
+        print(exceptions.text_error_template().render())
+        raise
+
 
 def render_perf_profiling_results(output_filepath, profile_names):
-  with open(output_filepath, 'w') as output_file:
-    output_file.write('<ul>\n')
-    for name in profile_names:
-      output_file.write('<li><a href=%s>%s</a></li>\n' % (name, name))
-    output_file.write('</ul>\n')
+    with open(output_filepath, 'w') as output_file:
+        output_file.write('<ul>\n')
+        for name in profile_names:
+            output_file.write('<li><a href=%s>%s</a></li>\n' % (name, name))
+        output_file.write('</ul>\n')
diff --git a/tools/run_tests/python_utils/start_port_server.py b/tools/run_tests/python_utils/start_port_server.py
index 786103c..5572cdc 100644
--- a/tools/run_tests/python_utils/start_port_server.py
+++ b/tools/run_tests/python_utils/start_port_server.py
@@ -22,10 +22,10 @@
 import tempfile
 import time
 
-
 # must be synchronized with test/core/utils/port_server_client.h
 _PORT_SERVER_PORT = 32766
 
+
 def start_port_server():
     # check if a compatible port server is running
     # if incompatible (version mismatch) ==> start a new one
@@ -33,9 +33,8 @@
     # otherwise, leave it up
     try:
         version = int(
-            urllib.urlopen(
-                'http://localhost:%d/version_number' %
-                _PORT_SERVER_PORT).read())
+            urllib.urlopen('http://localhost:%d/version_number' %
+                           _PORT_SERVER_PORT).read())
         logging.info('detected port server running version %d', version)
         running = True
     except Exception as e:
@@ -92,8 +91,8 @@
                 # try one final time: maybe another build managed to start one
                 time.sleep(1)
                 try:
-                    urllib.urlopen(
-                        'http://localhost:%d/get' % _PORT_SERVER_PORT).read()
+                    urllib.urlopen('http://localhost:%d/get' %
+                                   _PORT_SERVER_PORT).read()
                     logging.info(
                         'last ditch attempt to contact port server succeeded')
                     break
diff --git a/tools/run_tests/python_utils/upload_test_results.py b/tools/run_tests/python_utils/upload_test_results.py
index ea97bc0..9eb8e2a 100644
--- a/tools/run_tests/python_utils/upload_test_results.py
+++ b/tools/run_tests/python_utils/upload_test_results.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Helper to upload Jenkins test results to BQ"""
 
 from __future__ import print_function
@@ -23,8 +22,8 @@
 import time
 import uuid
 
-gcp_utils_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
 sys.path.append(gcp_utils_dir)
 import big_query_utils
 
@@ -35,55 +34,57 @@
 _PARTITION_TYPE = 'DAY'
 _PROJECT_ID = 'grpc-testing'
 _RESULTS_SCHEMA = [
-  ('job_name', 'STRING', 'Name of Jenkins job'),
-  ('build_id', 'INTEGER', 'Build ID of Jenkins job'),
-  ('build_url', 'STRING', 'URL of Jenkins job'),
-  ('test_name', 'STRING', 'Individual test name'),
-  ('language', 'STRING', 'Language of test'),
-  ('platform', 'STRING', 'Platform used for test'),
-  ('config', 'STRING', 'Config used for test'),
-  ('compiler', 'STRING', 'Compiler used for test'),
-  ('iomgr_platform', 'STRING', 'Iomgr used for test'),
-  ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
-  ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
-  ('elapsed_time', 'FLOAT', 'How long test took to run'),
-  ('cpu_estimated', 'FLOAT', 'Estimated CPU usage of test'),
-  ('cpu_measured', 'FLOAT', 'Actual CPU usage of test'),
-  ('return_code', 'INTEGER', 'Exit code of test'),
+    ('job_name', 'STRING', 'Name of Jenkins job'),
+    ('build_id', 'INTEGER', 'Build ID of Jenkins job'),
+    ('build_url', 'STRING', 'URL of Jenkins job'),
+    ('test_name', 'STRING', 'Individual test name'),
+    ('language', 'STRING', 'Language of test'),
+    ('platform', 'STRING', 'Platform used for test'),
+    ('config', 'STRING', 'Config used for test'),
+    ('compiler', 'STRING', 'Compiler used for test'),
+    ('iomgr_platform', 'STRING', 'Iomgr used for test'),
+    ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
+    ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
+    ('elapsed_time', 'FLOAT', 'How long test took to run'),
+    ('cpu_estimated', 'FLOAT', 'Estimated CPU usage of test'),
+    ('cpu_measured', 'FLOAT', 'Actual CPU usage of test'),
+    ('return_code', 'INTEGER', 'Exit code of test'),
 ]
 _INTEROP_RESULTS_SCHEMA = [
-  ('job_name', 'STRING', 'Name of Jenkins/Kokoro job'),
-  ('build_id', 'INTEGER', 'Build ID of Jenkins/Kokoro job'),
-  ('build_url', 'STRING', 'URL of Jenkins/Kokoro job'),
-  ('test_name', 'STRING', 'Unique test name combining client, server, and test_name'),
-  ('suite', 'STRING', 'Test suite: cloud_to_cloud, cloud_to_prod, or cloud_to_prod_auth'),
-  ('client', 'STRING', 'Client language'),
-  ('server', 'STRING', 'Server host name'),
-  ('test_case', 'STRING', 'Name of test case'),
-  ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
-  ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
-  ('elapsed_time', 'FLOAT', 'How long test took to run'),
+    ('job_name', 'STRING', 'Name of Jenkins/Kokoro job'),
+    ('build_id', 'INTEGER', 'Build ID of Jenkins/Kokoro job'),
+    ('build_url', 'STRING', 'URL of Jenkins/Kokoro job'),
+    ('test_name', 'STRING',
+     'Unique test name combining client, server, and test_name'),
+    ('suite', 'STRING',
+     'Test suite: cloud_to_cloud, cloud_to_prod, or cloud_to_prod_auth'),
+    ('client', 'STRING', 'Client language'),
+    ('server', 'STRING', 'Server host name'),
+    ('test_case', 'STRING', 'Name of test case'),
+    ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
+    ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
+    ('elapsed_time', 'FLOAT', 'How long test took to run'),
 ]
 
 
 def _get_build_metadata(test_results):
-  """Add Jenkins/Kokoro build metadata to test_results based on environment
+    """Add Jenkins/Kokoro build metadata to test_results based on environment
   variables set by Jenkins/Kokoro.
   """
-  build_id = os.getenv('BUILD_ID') or os.getenv('KOKORO_BUILD_NUMBER')
-  build_url = os.getenv('BUILD_URL') or os.getenv('KOKORO_BUILD_URL')
-  job_name = os.getenv('JOB_BASE_NAME') or os.getenv('KOKORO_JOB_NAME')
+    build_id = os.getenv('BUILD_ID') or os.getenv('KOKORO_BUILD_NUMBER')
+    build_url = os.getenv('BUILD_URL') or os.getenv('KOKORO_BUILD_URL')
+    job_name = os.getenv('JOB_BASE_NAME') or os.getenv('KOKORO_JOB_NAME')
 
-  if build_id:
-    test_results['build_id'] = build_id
-  if build_url:
-    test_results['build_url'] = build_url
-  if job_name:
-    test_results['job_name'] = job_name
+    if build_id:
+        test_results['build_id'] = build_id
+    if build_url:
+        test_results['build_url'] = build_url
+    if job_name:
+        test_results['job_name'] = job_name
 
 
 def upload_results_to_bq(resultset, bq_table, args, platform):
-  """Upload test results to a BQ table.
+    """Upload test results to a BQ table.
 
   Args:
       resultset: dictionary generated by jobset.run
@@ -91,77 +92,97 @@
       args: args in run_tests.py, generated by argparse
       platform: string name of platform tests were run on
   """
-  bq = big_query_utils.create_big_query()
-  big_query_utils.create_partitioned_table(bq, _PROJECT_ID, _DATASET_ID, bq_table, _RESULTS_SCHEMA, _DESCRIPTION,
-                                           partition_type=_PARTITION_TYPE, expiration_ms= _EXPIRATION_MS)
+    bq = big_query_utils.create_big_query()
+    big_query_utils.create_partitioned_table(
+        bq,
+        _PROJECT_ID,
+        _DATASET_ID,
+        bq_table,
+        _RESULTS_SCHEMA,
+        _DESCRIPTION,
+        partition_type=_PARTITION_TYPE,
+        expiration_ms=_EXPIRATION_MS)
 
-  for shortname, results in six.iteritems(resultset):
-    for result in results:
-      test_results = {}
-      _get_build_metadata(test_results)
-      test_results['compiler'] = args.compiler
-      test_results['config'] = args.config
-      test_results['cpu_estimated'] = result.cpu_estimated
-      test_results['cpu_measured'] = result.cpu_measured
-      test_results['elapsed_time'] = '%.2f' % result.elapsed_time
-      test_results['iomgr_platform'] = args.iomgr_platform
-      # args.language is a list, but will always have one element in the contexts
-      # this function is used.
-      test_results['language'] = args.language[0]
-      test_results['platform'] = platform
-      test_results['result'] = result.state
-      test_results['return_code'] = result.returncode
-      test_results['test_name'] = shortname
-      test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
+    for shortname, results in six.iteritems(resultset):
+        for result in results:
+            test_results = {}
+            _get_build_metadata(test_results)
+            test_results['compiler'] = args.compiler
+            test_results['config'] = args.config
+            test_results['cpu_estimated'] = result.cpu_estimated
+            test_results['cpu_measured'] = result.cpu_measured
+            test_results['elapsed_time'] = '%.2f' % result.elapsed_time
+            test_results['iomgr_platform'] = args.iomgr_platform
+            # args.language is a list, but will always have one element in the contexts
+            # this function is used.
+            test_results['language'] = args.language[0]
+            test_results['platform'] = platform
+            test_results['result'] = result.state
+            test_results['return_code'] = result.returncode
+            test_results['test_name'] = shortname
+            test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
 
-      row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
+            row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
 
-      # TODO(jtattermusch): rows are inserted one by one, very inefficient
-      max_retries = 3
-      for attempt in range(max_retries):
-        if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, bq_table, [row]):
-          break
-        else:
-          if attempt < max_retries - 1:
-            print('Error uploading result to bigquery, will retry.')
-          else:
-            print('Error uploading result to bigquery, all attempts failed.')
-            sys.exit(1)
+            # TODO(jtattermusch): rows are inserted one by one, very inefficient
+            max_retries = 3
+            for attempt in range(max_retries):
+                if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
+                                               bq_table, [row]):
+                    break
+                else:
+                    if attempt < max_retries - 1:
+                        print('Error uploading result to bigquery, will retry.')
+                    else:
+                        print(
+                            'Error uploading result to bigquery, all attempts failed.'
+                        )
+                        sys.exit(1)
 
 
 def upload_interop_results_to_bq(resultset, bq_table, args):
-  """Upload interop test results to a BQ table.
+    """Upload interop test results to a BQ table.
 
   Args:
       resultset: dictionary generated by jobset.run
       bq_table: string name of table to create/upload results to in BQ
       args: args in run_interop_tests.py, generated by argparse
   """
-  bq = big_query_utils.create_big_query()
-  big_query_utils.create_partitioned_table(bq, _PROJECT_ID, _DATASET_ID, bq_table, _INTEROP_RESULTS_SCHEMA, _DESCRIPTION,
-                                           partition_type=_PARTITION_TYPE, expiration_ms= _EXPIRATION_MS)
+    bq = big_query_utils.create_big_query()
+    big_query_utils.create_partitioned_table(
+        bq,
+        _PROJECT_ID,
+        _DATASET_ID,
+        bq_table,
+        _INTEROP_RESULTS_SCHEMA,
+        _DESCRIPTION,
+        partition_type=_PARTITION_TYPE,
+        expiration_ms=_EXPIRATION_MS)
 
-  for shortname, results in six.iteritems(resultset):
-    for result in results:
-      test_results = {}
-      _get_build_metadata(test_results)
-      test_results['elapsed_time'] = '%.2f' % result.elapsed_time
-      test_results['result'] = result.state
-      test_results['test_name'] = shortname
-      test_results['suite'] = shortname.split(':')[0]
-      test_results['client'] = shortname.split(':')[1]
-      test_results['server'] = shortname.split(':')[2]
-      test_results['test_case'] = shortname.split(':')[3]
-      test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
-      row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
-      # TODO(jtattermusch): rows are inserted one by one, very inefficient
-      max_retries = 3
-      for attempt in range(max_retries):
-        if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, bq_table, [row]):
-          break
-        else:
-          if attempt < max_retries - 1:
-            print('Error uploading result to bigquery, will retry.')
-          else:
-            print('Error uploading result to bigquery, all attempts failed.')
-            sys.exit(1)
+    for shortname, results in six.iteritems(resultset):
+        for result in results:
+            test_results = {}
+            _get_build_metadata(test_results)
+            test_results['elapsed_time'] = '%.2f' % result.elapsed_time
+            test_results['result'] = result.state
+            test_results['test_name'] = shortname
+            test_results['suite'] = shortname.split(':')[0]
+            test_results['client'] = shortname.split(':')[1]
+            test_results['server'] = shortname.split(':')[2]
+            test_results['test_case'] = shortname.split(':')[3]
+            test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
+            row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
+            # TODO(jtattermusch): rows are inserted one by one, very inefficient
+            max_retries = 3
+            for attempt in range(max_retries):
+                if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
+                                               bq_table, [row]):
+                    break
+                else:
+                    if attempt < max_retries - 1:
+                        print('Error uploading result to bigquery, will retry.')
+                    else:
+                        print(
+                            'Error uploading result to bigquery, all attempts failed.'
+                        )
+                        sys.exit(1)
diff --git a/tools/run_tests/python_utils/watch_dirs.py b/tools/run_tests/python_utils/watch_dirs.py
index 7bd085e..d2ad303 100755
--- a/tools/run_tests/python_utils/watch_dirs.py
+++ b/tools/run_tests/python_utils/watch_dirs.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Helper to watch a (set) of directories for modifications."""
 
 import os
@@ -19,42 +18,42 @@
 
 
 class DirWatcher(object):
-  """Helper to watch a (set) of directories for modifications."""
+    """Helper to watch a (set) of directories for modifications."""
 
-  def __init__(self, paths):
-    if isinstance(paths, basestring):
-      paths = [paths]
-    self._done = False
-    self.paths = list(paths)
-    self.lastrun = time.time()
-    self._cache = self._calculate()
+    def __init__(self, paths):
+        if isinstance(paths, basestring):
+            paths = [paths]
+        self._done = False
+        self.paths = list(paths)
+        self.lastrun = time.time()
+        self._cache = self._calculate()
 
-  def _calculate(self):
-    """Walk over all subscribed paths, check most recent mtime."""
-    most_recent_change = None
-    for path in self.paths:
-      if not os.path.exists(path):
-        continue
-      if not os.path.isdir(path):
-        continue
-      for root, _, files in os.walk(path):
-        for f in files:
-          if f and f[0] == '.': continue
-          try:
-            st = os.stat(os.path.join(root, f))
-          except OSError as e:
-            if e.errno == os.errno.ENOENT:
-              continue
-            raise
-          if most_recent_change is None:
-            most_recent_change = st.st_mtime
-          else:
-            most_recent_change = max(most_recent_change, st.st_mtime)
-    return most_recent_change
+    def _calculate(self):
+        """Walk over all subscribed paths, check most recent mtime."""
+        most_recent_change = None
+        for path in self.paths:
+            if not os.path.exists(path):
+                continue
+            if not os.path.isdir(path):
+                continue
+            for root, _, files in os.walk(path):
+                for f in files:
+                    if f and f[0] == '.': continue
+                    try:
+                        st = os.stat(os.path.join(root, f))
+                    except OSError as e:
+                        if e.errno == os.errno.ENOENT:
+                            continue
+                        raise
+                    if most_recent_change is None:
+                        most_recent_change = st.st_mtime
+                    else:
+                        most_recent_change = max(most_recent_change,
+                                                 st.st_mtime)
+        return most_recent_change
 
-  def most_recent_change(self):
-    if time.time() - self.lastrun > 1:
-      self._cache = self._calculate()
-      self.lastrun = time.time()
-    return self._cache
-
+    def most_recent_change(self):
+        if time.time() - self.lastrun > 1:
+            self._cache = self._calculate()
+            self.lastrun = time.time()
+        return self._cache
diff --git a/tools/run_tests/run_build_statistics.py b/tools/run_tests/run_build_statistics.py
index 1e957b6..4af00a4 100755
--- a/tools/run_tests/run_build_statistics.py
+++ b/tools/run_tests/run_build_statistics.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Tool to get build statistics from Jenkins and upload to BigQuery."""
 
 from __future__ import print_function
@@ -27,39 +26,38 @@
 import sys
 import urllib
 
-
-gcp_utils_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../gcp/utils'))
 sys.path.append(gcp_utils_dir)
 import big_query_utils
 
-
 _PROJECT_ID = 'grpc-testing'
 _HAS_MATRIX = True
-_BUILDS = {'gRPC_interop_master': not _HAS_MATRIX,
-           'gRPC_master_linux': not _HAS_MATRIX,
-           'gRPC_master_macos': not _HAS_MATRIX,
-           'gRPC_master_windows': not _HAS_MATRIX,
-           'gRPC_performance_master': not _HAS_MATRIX,
-           'gRPC_portability_master_linux': not _HAS_MATRIX,
-           'gRPC_portability_master_windows': not _HAS_MATRIX,
-           'gRPC_master_asanitizer_c': not _HAS_MATRIX,
-           'gRPC_master_asanitizer_cpp': not _HAS_MATRIX,
-           'gRPC_master_msan_c': not _HAS_MATRIX,
-           'gRPC_master_tsanitizer_c': not _HAS_MATRIX,
-           'gRPC_master_tsan_cpp': not _HAS_MATRIX,
-           'gRPC_interop_pull_requests': not _HAS_MATRIX,
-           'gRPC_performance_pull_requests': not _HAS_MATRIX,
-           'gRPC_portability_pull_requests_linux': not _HAS_MATRIX,
-           'gRPC_portability_pr_win': not _HAS_MATRIX,
-           'gRPC_pull_requests_linux': not _HAS_MATRIX,
-           'gRPC_pull_requests_macos': not _HAS_MATRIX,
-           'gRPC_pr_win': not _HAS_MATRIX,
-           'gRPC_pull_requests_asan_c': not _HAS_MATRIX,
-           'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX,
-           'gRPC_pull_requests_msan_c': not _HAS_MATRIX,
-           'gRPC_pull_requests_tsan_c': not _HAS_MATRIX,
-           'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX,
+_BUILDS = {
+    'gRPC_interop_master': not _HAS_MATRIX,
+    'gRPC_master_linux': not _HAS_MATRIX,
+    'gRPC_master_macos': not _HAS_MATRIX,
+    'gRPC_master_windows': not _HAS_MATRIX,
+    'gRPC_performance_master': not _HAS_MATRIX,
+    'gRPC_portability_master_linux': not _HAS_MATRIX,
+    'gRPC_portability_master_windows': not _HAS_MATRIX,
+    'gRPC_master_asanitizer_c': not _HAS_MATRIX,
+    'gRPC_master_asanitizer_cpp': not _HAS_MATRIX,
+    'gRPC_master_msan_c': not _HAS_MATRIX,
+    'gRPC_master_tsanitizer_c': not _HAS_MATRIX,
+    'gRPC_master_tsan_cpp': not _HAS_MATRIX,
+    'gRPC_interop_pull_requests': not _HAS_MATRIX,
+    'gRPC_performance_pull_requests': not _HAS_MATRIX,
+    'gRPC_portability_pull_requests_linux': not _HAS_MATRIX,
+    'gRPC_portability_pr_win': not _HAS_MATRIX,
+    'gRPC_pull_requests_linux': not _HAS_MATRIX,
+    'gRPC_pull_requests_macos': not _HAS_MATRIX,
+    'gRPC_pr_win': not _HAS_MATRIX,
+    'gRPC_pull_requests_asan_c': not _HAS_MATRIX,
+    'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX,
+    'gRPC_pull_requests_msan_c': not _HAS_MATRIX,
+    'gRPC_pull_requests_tsan_c': not _HAS_MATRIX,
+    'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX,
 }
 _URL_BASE = 'https://grpc-testing.appspot.com/job'
 
@@ -99,147 +97,155 @@
 
 
 def _scrape_for_known_errors(html):
-  error_list = []
-  for known_error in _KNOWN_ERRORS:
-    errors = re.findall(known_error, html)
-    this_error_count = len(errors)
-    if this_error_count > 0: 
-      error_list.append({'description': known_error,
-                         'count': this_error_count})
-      print('====> %d failures due to %s' % (this_error_count, known_error))
-  return error_list
+    error_list = []
+    for known_error in _KNOWN_ERRORS:
+        errors = re.findall(known_error, html)
+        this_error_count = len(errors)
+        if this_error_count > 0:
+            error_list.append({
+                'description': known_error,
+                'count': this_error_count
+            })
+            print('====> %d failures due to %s' %
+                  (this_error_count, known_error))
+    return error_list
 
 
 def _no_report_files_found(html):
-  return _NO_REPORT_FILES_FOUND_ERROR in html
+    return _NO_REPORT_FILES_FOUND_ERROR in html
 
 
 def _get_last_processed_buildnumber(build_name):
-  query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (
-      _PROJECT_ID, _DATASET_ID, build_name)
-  query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query)
-  page = bq.jobs().getQueryResults(
-      pageToken=None,
-      **query_job['jobReference']).execute(num_retries=3)
-  if page['rows'][0]['f'][0]['v']:
-    return int(page['rows'][0]['f'][0]['v'])
-  return 0
+    query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (
+        _PROJECT_ID, _DATASET_ID, build_name)
+    query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query)
+    page = bq.jobs().getQueryResults(
+        pageToken=None, **query_job['jobReference']).execute(num_retries=3)
+    if page['rows'][0]['f'][0]['v']:
+        return int(page['rows'][0]['f'][0]['v'])
+    return 0
 
 
 def _process_matrix(build, url_base):
-  matrix_list = []
-  for matrix in build.get_matrix_runs():
-    matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*', 
-                          matrix.name).groups()[0]
-    matrix_tuple = matrix_str.split(',')
-    json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % (
-        url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
-    console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % (
-        url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
-    matrix_dict = {'name': matrix_str,
-                   'duration': matrix.get_duration().total_seconds()}
-    matrix_dict.update(_process_build(json_url, console_url))
-    matrix_list.append(matrix_dict)
+    matrix_list = []
+    for matrix in build.get_matrix_runs():
+        matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*',
+                              matrix.name).groups()[0]
+        matrix_tuple = matrix_str.split(',')
+        json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % (
+            url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
+        console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % (
+            url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
+        matrix_dict = {
+            'name': matrix_str,
+            'duration': matrix.get_duration().total_seconds()
+        }
+        matrix_dict.update(_process_build(json_url, console_url))
+        matrix_list.append(matrix_dict)
 
-  return matrix_list 
+    return matrix_list
 
 
 def _process_build(json_url, console_url):
-  build_result = {}
-  error_list = []
-  try:
-    html = urllib.urlopen(json_url).read()
-    test_result = json.loads(html)
-    print('====> Parsing result from %s' % json_url)
-    failure_count = test_result['failCount']
-    build_result['pass_count'] = test_result['passCount']
-    build_result['failure_count'] = failure_count
-    # This means Jenkins failure occurred.
-    build_result['no_report_files_found'] = _no_report_files_found(html)
-    # Only check errors if Jenkins failure occurred.
-    if build_result['no_report_files_found']:
-      error_list = _scrape_for_known_errors(html)
-  except Exception as e:
-    print('====> Got exception for %s: %s.' % (json_url, str(e)))   
-    print('====> Parsing errors from %s.' % console_url)
-    html = urllib.urlopen(console_url).read()
-    build_result['pass_count'] = 0  
-    build_result['failure_count'] = 1
-    # In this case, the string doesn't exist in the result html but the fact 
-    # that we fail to parse the result html indicates Jenkins failure and hence 
-    # no report files were generated.
-    build_result['no_report_files_found'] = True
-    error_list = _scrape_for_known_errors(html)
+    build_result = {}
+    error_list = []
+    try:
+        html = urllib.urlopen(json_url).read()
+        test_result = json.loads(html)
+        print('====> Parsing result from %s' % json_url)
+        failure_count = test_result['failCount']
+        build_result['pass_count'] = test_result['passCount']
+        build_result['failure_count'] = failure_count
+        # This means Jenkins failure occurred.
+        build_result['no_report_files_found'] = _no_report_files_found(html)
+        # Only check errors if Jenkins failure occurred.
+        if build_result['no_report_files_found']:
+            error_list = _scrape_for_known_errors(html)
+    except Exception as e:
+        print('====> Got exception for %s: %s.' % (json_url, str(e)))
+        print('====> Parsing errors from %s.' % console_url)
+        html = urllib.urlopen(console_url).read()
+        build_result['pass_count'] = 0
+        build_result['failure_count'] = 1
+        # In this case, the string doesn't exist in the result html but the fact 
+        # that we fail to parse the result html indicates Jenkins failure and hence 
+        # no report files were generated.
+        build_result['no_report_files_found'] = True
+        error_list = _scrape_for_known_errors(html)
 
-  if error_list:
-    build_result['error'] = error_list
-  elif build_result['no_report_files_found']:
-    build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}]
-  else:
-    build_result['error'] = [{'description': '', 'count': 0}]
+    if error_list:
+        build_result['error'] = error_list
+    elif build_result['no_report_files_found']:
+        build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}]
+    else:
+        build_result['error'] = [{'description': '', 'count': 0}]
 
-  return build_result 
+    return build_result
 
 
 # parse command line
 argp = argparse.ArgumentParser(description='Get build statistics.')
 argp.add_argument('-u', '--username', default='jenkins')
-argp.add_argument('-b', '--builds', 
-                  choices=['all'] + sorted(_BUILDS.keys()),
-                  nargs='+',
-                  default=['all'])
+argp.add_argument(
+    '-b',
+    '--builds',
+    choices=['all'] + sorted(_BUILDS.keys()),
+    nargs='+',
+    default=['all'])
 args = argp.parse_args()
 
 J = Jenkins('https://grpc-testing.appspot.com', args.username, 'apiToken')
 bq = big_query_utils.create_big_query()
 
 for build_name in _BUILDS.keys() if 'all' in args.builds else args.builds:
-  print('====> Build: %s' % build_name)
-  # Since get_last_completed_build() always fails due to malformatted string
-  # error, we use get_build_metadata() instead.
-  job = None
-  try:
-    job = J[build_name]
-  except Exception as e:
-    print('====> Failed to get build %s: %s.' % (build_name, str(e)))
-    continue
-  last_processed_build_number = _get_last_processed_buildnumber(build_name)
-  last_complete_build_number = job.get_last_completed_buildnumber()
-  # To avoid processing all builds for a project never looked at. In this case,
-  # only examine 10 latest builds.
-  starting_build_number = max(last_processed_build_number+1, 
-                              last_complete_build_number-9)
-  for build_number in xrange(starting_build_number, 
-                             last_complete_build_number+1):
-    print('====> Processing %s build %d.' % (build_name, build_number))
-    build = None
+    print('====> Build: %s' % build_name)
+    # Since get_last_completed_build() always fails due to malformatted string
+    # error, we use get_build_metadata() instead.
+    job = None
     try:
-      build = job.get_build_metadata(build_number)
-      print('====> Build status: %s.' % build.get_status())
-      if build.get_status() == 'ABORTED':
+        job = J[build_name]
+    except Exception as e:
+        print('====> Failed to get build %s: %s.' % (build_name, str(e)))
         continue
-      # If any build is still running, stop processing this job. Next time, we
-      # start from where it was left so that all builds are processed 
-      # sequentially.
-      if build.is_running():
-        print('====> Build %d is still running.' % build_number)
-        break
-    except KeyError:
-      print('====> Build %s is missing. Skip.' % build_number)
-      continue
-    build_result = {'build_number': build_number, 
-                    'timestamp': str(build.get_timestamp())}
-    url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number)
-    if _BUILDS[build_name]:  # The build has matrix, such as gRPC_master.
-      build_result['matrix'] = _process_matrix(build, url_base)
-    else:
-      json_url = '%s/testReport/api/json' % url_base
-      console_url = '%s/consoleFull' % url_base
-      build_result['duration'] = build.get_duration().total_seconds()
-      build_stat = _process_build(json_url, console_url)
-      build_result.update(build_stat)
-    rows = [big_query_utils.make_row(build_number, build_result)]
-    if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, build_name, 
-                                       rows):
-      print('====> Error uploading result to bigquery.')
-      sys.exit(1)
+    last_processed_build_number = _get_last_processed_buildnumber(build_name)
+    last_complete_build_number = job.get_last_completed_buildnumber()
+    # To avoid processing all builds for a project never looked at. In this case,
+    # only examine 10 latest builds.
+    starting_build_number = max(last_processed_build_number + 1,
+                                last_complete_build_number - 9)
+    for build_number in xrange(starting_build_number,
+                               last_complete_build_number + 1):
+        print('====> Processing %s build %d.' % (build_name, build_number))
+        build = None
+        try:
+            build = job.get_build_metadata(build_number)
+            print('====> Build status: %s.' % build.get_status())
+            if build.get_status() == 'ABORTED':
+                continue
+            # If any build is still running, stop processing this job. Next time, we
+            # start from where it was left so that all builds are processed 
+            # sequentially.
+            if build.is_running():
+                print('====> Build %d is still running.' % build_number)
+                break
+        except KeyError:
+            print('====> Build %s is missing. Skip.' % build_number)
+            continue
+        build_result = {
+            'build_number': build_number,
+            'timestamp': str(build.get_timestamp())
+        }
+        url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number)
+        if _BUILDS[build_name]:  # The build has matrix, such as gRPC_master.
+            build_result['matrix'] = _process_matrix(build, url_base)
+        else:
+            json_url = '%s/testReport/api/json' % url_base
+            console_url = '%s/consoleFull' % url_base
+            build_result['duration'] = build.get_duration().total_seconds()
+            build_stat = _process_build(json_url, console_url)
+            build_result.update(build_stat)
+        rows = [big_query_utils.make_row(build_number, build_result)]
+        if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
+                                           build_name, rows):
+            print('====> Error uploading result to bigquery.')
+            sys.exit(1)
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index 8f46ea9..99f4298 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Run interop (cross-language) tests in parallel."""
 
 from __future__ import print_function
@@ -37,9 +36,9 @@
 import python_utils.report_utils as report_utils
 # It's ok to not import because this is only necessary to upload results to BQ.
 try:
-  from python_utils.upload_test_results import upload_interop_results_to_bq
+    from python_utils.upload_test_results import upload_interop_results_to_bq
 except ImportError as e:
-  print(e)
+    print(e)
 
 # Docker doesn't clean up after itself, so we do it on exit.
 atexit.register(lambda: subprocess.call(['stty', 'echo']))
@@ -47,22 +46,24 @@
 ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 os.chdir(ROOT)
 
-_DEFAULT_SERVER_PORT=8080
+_DEFAULT_SERVER_PORT = 8080
 
-_SKIP_CLIENT_COMPRESSION = ['client_compressed_unary',
-                            'client_compressed_streaming']
+_SKIP_CLIENT_COMPRESSION = [
+    'client_compressed_unary', 'client_compressed_streaming'
+]
 
-_SKIP_SERVER_COMPRESSION = ['server_compressed_unary',
-                            'server_compressed_streaming']
+_SKIP_SERVER_COMPRESSION = [
+    'server_compressed_unary', 'server_compressed_streaming'
+]
 
 _SKIP_COMPRESSION = _SKIP_CLIENT_COMPRESSION + _SKIP_SERVER_COMPRESSION
 
-_SKIP_ADVANCED = ['status_code_and_message',
-                  'custom_metadata',
-                  'unimplemented_method',
-                  'unimplemented_service']
+_SKIP_ADVANCED = [
+    'status_code_and_message', 'custom_metadata', 'unimplemented_method',
+    'unimplemented_service'
+]
 
-_TEST_TIMEOUT = 3*60
+_TEST_TIMEOUT = 3 * 60
 
 # disable this test on core-based languages,
 # see https://github.com/grpc/grpc/issues/9779
@@ -77,977 +78,1054 @@
 
 class CXXLanguage:
 
-  def __init__(self):
-    self.client_cwd = None
-    self.server_cwd = None
-    self.http2_cwd = None
-    self.safename = 'cxx'
+    def __init__(self):
+        self.client_cwd = None
+        self.server_cwd = None
+        self.http2_cwd = None
+        self.safename = 'cxx'
 
-  def client_cmd(self, args):
-    return ['bins/opt/interop_client'] + args
+    def client_cmd(self, args):
+        return ['bins/opt/interop_client'] + args
 
-  def client_cmd_http2interop(self, args):
-    return ['bins/opt/http2_client'] + args
+    def client_cmd_http2interop(self, args):
+        return ['bins/opt/http2_client'] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return ['bins/opt/interop_server'] + args
+    def server_cmd(self, args):
+        return ['bins/opt/interop_server'] + args
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return []
+    def unimplemented_test_cases_server(self):
+        return []
 
-  def __str__(self):
-    return 'c++'
+    def __str__(self):
+        return 'c++'
 
 
 class CSharpLanguage:
 
-  def __init__(self):
-    self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/net45'
-    self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/net45'
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/net45'
+        self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/net45'
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args
+    def client_cmd(self, args):
+        return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args
+    def server_cmd(self, args):
+        return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'csharp'
+    def __str__(self):
+        return 'csharp'
 
 
 class CSharpCoreCLRLanguage:
 
-  def __init__(self):
-    self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp1.0'
-    self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp1.0'
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp1.0'
+        self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp1.0'
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args
+    def client_cmd(self, args):
+        return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args
+    def server_cmd(self, args):
+        return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'csharpcoreclr'
+    def __str__(self):
+        return 'csharpcoreclr'
 
 
 class JavaLanguage:
 
-  def __init__(self):
-    self.client_cwd = '../grpc-java'
-    self.server_cwd = '../grpc-java'
-    self.http2_cwd = '../grpc-java'
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = '../grpc-java'
+        self.server_cwd = '../grpc-java'
+        self.http2_cwd = '../grpc-java'
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['./run-test-client.sh'] + args
+    def client_cmd(self, args):
+        return ['./run-test-client.sh'] + args
 
-  def client_cmd_http2interop(self, args):
-    return ['./interop-testing/build/install/grpc-interop-testing/bin/http2-client'] + args
+    def client_cmd_http2interop(self, args):
+        return [
+            './interop-testing/build/install/grpc-interop-testing/bin/http2-client'
+        ] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return ['./run-test-server.sh'] + args
+    def server_cmd(self, args):
+        return ['./run-test-server.sh'] + args
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return []
+    def unimplemented_test_cases(self):
+        return []
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'java'
+    def __str__(self):
+        return 'java'
 
 
 class JavaOkHttpClient:
 
-  def __init__(self):
-    self.client_cwd = '../grpc-java'
-    self.safename = 'java'
+    def __init__(self):
+        self.client_cwd = '../grpc-java'
+        self.safename = 'java'
 
-  def client_cmd(self, args):
-    return ['./run-test-client.sh', '--use_okhttp=true'] + args
+    def client_cmd(self, args):
+        return ['./run-test-client.sh', '--use_okhttp=true'] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_DATA_FRAME_PADDING
 
-  def __str__(self):
-    return 'javaokhttp'
+    def __str__(self):
+        return 'javaokhttp'
 
 
 class GoLanguage:
 
-  def __init__(self):
-    # TODO: this relies on running inside docker
-    self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
-    self.server_cwd = '/go/src/google.golang.org/grpc/interop/server'
-    self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2'
-    self.safename = str(self)
+    def __init__(self):
+        # TODO: this relies on running inside docker
+        self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
+        self.server_cwd = '/go/src/google.golang.org/grpc/interop/server'
+        self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2'
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['go', 'run', 'client.go'] + args
+    def client_cmd(self, args):
+        return ['go', 'run', 'client.go'] + args
 
-  def client_cmd_http2interop(self, args):
-    return ['go', 'run', 'negative_http2_client.go'] + args
+    def client_cmd_http2interop(self, args):
+        return ['go', 'run', 'negative_http2_client.go'] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return ['go', 'run', 'server.go'] + args
+    def server_cmd(self, args):
+        return ['go', 'run', 'server.go'] + args
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases(self):
+        return _SKIP_COMPRESSION
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'go'
+    def __str__(self):
+        return 'go'
+
 
 class Http2Server:
-  """Represents the HTTP/2 Interop Test server
+    """Represents the HTTP/2 Interop Test server
 
   This pretends to be a language in order to be built and run, but really it
   isn't.
   """
-  def __init__(self):
-    self.server_cwd = None
-    self.safename = str(self)
 
-  def server_cmd(self, args):
-    return ['python test/http2_test/http2_test_server.py']
+    def __init__(self):
+        self.server_cwd = None
+        self.safename = str(self)
 
-  def cloud_to_prod_env(self):
-    return {}
+    def server_cmd(self, args):
+        return ['python test/http2_test/http2_test_server.py']
 
-  def global_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _TEST_CASES + _SKIP_DATA_FRAME_PADDING
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases_server(self):
-    return _TEST_CASES
+    def unimplemented_test_cases(self):
+        return _TEST_CASES + _SKIP_DATA_FRAME_PADDING
 
-  def __str__(self):
-    return 'http2'
+    def unimplemented_test_cases_server(self):
+        return _TEST_CASES
+
+    def __str__(self):
+        return 'http2'
+
 
 class Http2Client:
-  """Represents the HTTP/2 Interop Test
+    """Represents the HTTP/2 Interop Test
 
   This pretends to be a language in order to be built and run, but really it
   isn't.
   """
-  def __init__(self):
-    self.client_cwd = None
-    self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['tools/http2_interop/http2_interop.test', '-test.v'] + args
+    def __init__(self):
+        self.client_cwd = None
+        self.safename = str(self)
 
-  def cloud_to_prod_env(self):
-    return {}
+    def client_cmd(self, args):
+        return ['tools/http2_interop/http2_interop.test', '-test.v'] + args
 
-  def global_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _TEST_CASES
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases_server(self):
-    return _TEST_CASES
+    def unimplemented_test_cases(self):
+        return _TEST_CASES
 
-  def __str__(self):
-    return 'http2'
+    def unimplemented_test_cases_server(self):
+        return _TEST_CASES
+
+    def __str__(self):
+        return 'http2'
+
 
 class NodeLanguage:
 
-  def __init__(self):
-    self.client_cwd = '../grpc-node'
-    self.server_cwd = '../grpc-node'
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = '../grpc-node'
+        self.server_cwd = '../grpc-node'
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
+    def client_cmd(self, args):
+        return [
+            'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
             'node', '--require', './test/fixtures/native_native',
-            'test/interop/interop_client.js'] + args
+            'test/interop/interop_client.js'
+        ] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return ['packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
+    def server_cmd(self, args):
+        return [
+            'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
             'node', '--require', './test/fixtures/native_native',
-            'test/interop/interop_server.js'] + args
+            'test/interop/interop_server.js'
+        ] + args
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'node'
+    def __str__(self):
+        return 'node'
 
 
 class PHPLanguage:
 
-  def __init__(self):
-    self.client_cwd = None
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = None
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['src/php/bin/interop_client.sh'] + args
+    def client_cmd(self, args):
+        return ['src/php/bin/interop_client.sh'] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return []
+    def unimplemented_test_cases_server(self):
+        return []
 
-  def __str__(self):
-    return 'php'
+    def __str__(self):
+        return 'php'
 
 
 class PHP7Language:
 
-  def __init__(self):
-    self.client_cwd = None
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = None
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['src/php/bin/interop_client.sh'] + args
+    def client_cmd(self, args):
+        return ['src/php/bin/interop_client.sh'] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return []
+    def unimplemented_test_cases_server(self):
+        return []
 
-  def __str__(self):
-    return 'php7'
+    def __str__(self):
+        return 'php7'
+
 
 class ObjcLanguage:
 
-  def __init__(self):
-    self.client_cwd = 'src/objective-c/tests'
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = 'src/objective-c/tests'
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    # from args, extract the server port and craft xcodebuild command out of it
-    for arg in args:
-      port = re.search('--server_port=(\d+)', arg)
-      if port:
-        portnum = port.group(1)
-        cmdline = 'pod install && xcodebuild -workspace Tests.xcworkspace -scheme InteropTestsLocalSSL -destination name="iPhone 6" HOST_PORT_LOCALSSL=localhost:%s test'%portnum
-        return [cmdline]
+    def client_cmd(self, args):
+        # from args, extract the server port and craft xcodebuild command out of it
+        for arg in args:
+            port = re.search('--server_port=(\d+)', arg)
+            if port:
+                portnum = port.group(1)
+                cmdline = 'pod install && xcodebuild -workspace Tests.xcworkspace -scheme InteropTestsLocalSSL -destination name="iPhone 6" HOST_PORT_LOCALSSL=localhost:%s test' % portnum
+                return [cmdline]
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    # ObjC test runs all cases with the same command. It ignores the testcase
-    # cmdline argument. Here we return all but one test cases as unimplemented,
-    # and depend upon ObjC test's behavior that it runs all cases even when
-    # we tell it to run just one.
-    return _TEST_CASES[1:] + _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        # ObjC test runs all cases with the same command. It ignores the testcase
+        # cmdline argument. Here we return all but one test cases as unimplemented,
+        # and depend upon ObjC test's behavior that it runs all cases even when
+        # we tell it to run just one.
+        return _TEST_CASES[1:] + _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'objc'
+    def __str__(self):
+        return 'objc'
+
 
 class RubyLanguage:
 
-  def __init__(self):
-    self.client_cwd = None
-    self.server_cwd = None
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = None
+        self.server_cwd = None
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['tools/run_tests/interop/with_rvm.sh',
-            'ruby', 'src/ruby/pb/test/client.rb'] + args
+    def client_cmd(self, args):
+        return [
+            'tools/run_tests/interop/with_rvm.sh', 'ruby',
+            'src/ruby/pb/test/client.rb'
+        ] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return ['tools/run_tests/interop/with_rvm.sh',
-            'ruby', 'src/ruby/pb/test/server.rb'] + args
+    def server_cmd(self, args):
+        return [
+            'tools/run_tests/interop/with_rvm.sh', 'ruby',
+            'src/ruby/pb/test/server.rb'
+        ] + args
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'ruby'
+    def __str__(self):
+        return 'ruby'
+
 
 class PythonLanguage:
 
-  def __init__(self):
-    self.client_cwd = None
-    self.server_cwd = None
-    self.http2_cwd = None
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = None
+        self.server_cwd = None
+        self.http2_cwd = None
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return [
-        'py27/bin/python',
-        'src/python/grpcio_tests/setup.py',
-        'run_interop',
-        '--client',
-        '--args="{}"'.format(' '.join(args))
-    ]
+    def client_cmd(self, args):
+        return [
+            'py27/bin/python', 'src/python/grpcio_tests/setup.py',
+            'run_interop', '--client', '--args="{}"'.format(' '.join(args))
+        ]
 
-  def client_cmd_http2interop(self, args):
-    return [ 'py27/bin/python',
-              'src/python/grpcio_tests/tests/http2/negative_http2_client.py',
-           ] + args
+    def client_cmd_http2interop(self, args):
+        return [
+            'py27/bin/python',
+            'src/python/grpcio_tests/tests/http2/negative_http2_client.py',
+        ] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return [
-        'py27/bin/python',
-        'src/python/grpcio_tests/setup.py',
-        'run_interop',
-        '--server',
-        '--args="{}"'.format(' '.join(args))
-    ]
+    def server_cmd(self, args):
+        return [
+            'py27/bin/python', 'src/python/grpcio_tests/setup.py',
+            'run_interop', '--server', '--args="{}"'.format(' '.join(args))
+        ]
 
-  def global_env(self):
-    return {'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT),
-            'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)}
+    def global_env(self):
+        return {
+            'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT),
+            'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)
+        }
 
-  def unimplemented_test_cases(self):
-    return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'python'
+    def __str__(self):
+        return 'python'
 
 
 _LANGUAGES = {
-    'c++' : CXXLanguage(),
-    'csharp' : CSharpLanguage(),
-    'csharpcoreclr' : CSharpCoreCLRLanguage(),
-    'go' : GoLanguage(),
-    'java' : JavaLanguage(),
-    'javaokhttp' : JavaOkHttpClient(),
-    'node' : NodeLanguage(),
-    'php' :  PHPLanguage(),
-    'php7' :  PHP7Language(),
-    'objc' : ObjcLanguage(),
-    'ruby' : RubyLanguage(),
-    'python' : PythonLanguage(),
+    'c++': CXXLanguage(),
+    'csharp': CSharpLanguage(),
+    'csharpcoreclr': CSharpCoreCLRLanguage(),
+    'go': GoLanguage(),
+    'java': JavaLanguage(),
+    'javaokhttp': JavaOkHttpClient(),
+    'node': NodeLanguage(),
+    'php': PHPLanguage(),
+    'php7': PHP7Language(),
+    'objc': ObjcLanguage(),
+    'ruby': RubyLanguage(),
+    'python': PythonLanguage(),
 }
 
 # languages supported as cloud_to_cloud servers
-_SERVERS = ['c++', 'node', 'csharp', 'csharpcoreclr', 'java', 'go', 'ruby', 'python']
+_SERVERS = [
+    'c++', 'node', 'csharp', 'csharpcoreclr', 'java', 'go', 'ruby', 'python'
+]
 
-_TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong',
-               'empty_stream', 'client_streaming', 'server_streaming',
-               'cancel_after_begin', 'cancel_after_first_response',
-               'timeout_on_sleeping_server', 'custom_metadata',
-               'status_code_and_message', 'unimplemented_method',
-               'client_compressed_unary', 'server_compressed_unary',
-               'client_compressed_streaming', 'server_compressed_streaming',
-               'unimplemented_service']
+_TEST_CASES = [
+    'large_unary', 'empty_unary', 'ping_pong', 'empty_stream',
+    'client_streaming', 'server_streaming', 'cancel_after_begin',
+    'cancel_after_first_response', 'timeout_on_sleeping_server',
+    'custom_metadata', 'status_code_and_message', 'unimplemented_method',
+    'client_compressed_unary', 'server_compressed_unary',
+    'client_compressed_streaming', 'server_compressed_streaming',
+    'unimplemented_service'
+]
 
-_AUTH_TEST_CASES = ['compute_engine_creds', 'jwt_token_creds',
-                    'oauth2_auth_token', 'per_rpc_creds']
+_AUTH_TEST_CASES = [
+    'compute_engine_creds', 'jwt_token_creds', 'oauth2_auth_token',
+    'per_rpc_creds'
+]
 
 _HTTP2_TEST_CASES = ['tls', 'framing']
 
-_HTTP2_SERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data',
-                               'goaway', 'ping', 'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test']
+_HTTP2_SERVER_TEST_CASES = [
+    'rst_after_header', 'rst_after_data', 'rst_during_data', 'goaway', 'ping',
+    'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test'
+]
 
-_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = { 'data_frame_padding': 'large_unary', 'no_df_padding_sanity_test': 'large_unary' }
+_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = {
+    'data_frame_padding': 'large_unary',
+    'no_df_padding_sanity_test': 'large_unary'
+}
 
-_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys()
+_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys(
+)
 
-_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = ['java', 'go', 'python', 'c++']
+_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = [
+    'java', 'go', 'python', 'c++'
+]
 
 DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
 
+
 def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
-  """Wraps given cmdline array to create 'docker run' cmdline from it."""
-  docker_cmdline = ['docker', 'run', '-i', '--rm=true']
+    """Wraps given cmdline array to create 'docker run' cmdline from it."""
+    docker_cmdline = ['docker', 'run', '-i', '--rm=true']
 
-  # turn environ into -e docker args
-  if environ:
-    for k,v in environ.items():
-      docker_cmdline += ['-e', '%s=%s' % (k,v)]
+    # turn environ into -e docker args
+    if environ:
+        for k, v in environ.items():
+            docker_cmdline += ['-e', '%s=%s' % (k, v)]
 
-  # set working directory
-  workdir = DOCKER_WORKDIR_ROOT
-  if cwd:
-    workdir = os.path.join(workdir, cwd)
-  docker_cmdline += ['-w', workdir]
+    # set working directory
+    workdir = DOCKER_WORKDIR_ROOT
+    if cwd:
+        workdir = os.path.join(workdir, cwd)
+    docker_cmdline += ['-w', workdir]
 
-  docker_cmdline += docker_args + [image] + cmdline
-  return docker_cmdline
+    docker_cmdline += docker_args + [image] + cmdline
+    return docker_cmdline
 
 
 def manual_cmdline(docker_cmdline, docker_image):
-  """Returns docker cmdline adjusted for manual invocation."""
-  print_cmdline = []
-  for item in docker_cmdline:
-    if item.startswith('--name='):
-      continue
-    if item == docker_image:
-      item = "$docker_image"
-    item = item.replace('"', '\\"')
-    # add quotes when necessary
-    if any(character.isspace() for character in item):
-      item = "\"%s\"" % item
-    print_cmdline.append(item)
-  return ' '.join(print_cmdline)
+    """Returns docker cmdline adjusted for manual invocation."""
+    print_cmdline = []
+    for item in docker_cmdline:
+        if item.startswith('--name='):
+            continue
+        if item == docker_image:
+            item = "$docker_image"
+        item = item.replace('"', '\\"')
+        # add quotes when necessary
+        if any(character.isspace() for character in item):
+            item = "\"%s\"" % item
+        print_cmdline.append(item)
+    return ' '.join(print_cmdline)
 
 
 def write_cmdlog_maybe(cmdlog, filename):
-  """Returns docker cmdline adjusted for manual invocation."""
-  if cmdlog:
-    with open(filename, 'w') as logfile:
-      logfile.write('#!/bin/bash\n')
-      logfile.writelines("%s\n" % line for line in cmdlog)
-    print('Command log written to file %s' % filename)
+    """Returns docker cmdline adjusted for manual invocation."""
+    if cmdlog:
+        with open(filename, 'w') as logfile:
+            logfile.write('#!/bin/bash\n')
+            logfile.writelines("%s\n" % line for line in cmdlog)
+        print('Command log written to file %s' % filename)
 
 
 def bash_cmdline(cmdline):
-  """Creates bash -c cmdline from args list."""
-  # Use login shell:
-  # * makes error messages clearer if executables are missing
-  return ['bash', '-c', ' '.join(cmdline)]
+    """Creates bash -c cmdline from args list."""
+    # Use login shell:
+    # * makes error messages clearer if executables are missing
+    return ['bash', '-c', ' '.join(cmdline)]
 
 
 def auth_options(language, test_case):
-  """Returns (cmdline, env) tuple with cloud_to_prod_auth test options."""
+    """Returns (cmdline, env) tuple with cloud_to_prod_auth test options."""
 
-  language = str(language)
-  cmdargs = []
-  env = {}
+    language = str(language)
+    cmdargs = []
+    env = {}
 
-  # TODO(jtattermusch): this file path only works inside docker
-  key_filepath = '/root/service_account/GrpcTesting-726eb1347f15.json'
-  oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo'
-  key_file_arg = '--service_account_key_file=%s' % key_filepath
-  default_account_arg = '--default_service_account=830293263384-compute@developer.gserviceaccount.com'
+    # TODO(jtattermusch): this file path only works inside docker
+    key_filepath = '/root/service_account/GrpcTesting-726eb1347f15.json'
+    oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo'
+    key_file_arg = '--service_account_key_file=%s' % key_filepath
+    default_account_arg = '--default_service_account=830293263384-compute@developer.gserviceaccount.com'
 
-  if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']:
-    if language in ['csharp', 'csharpcoreclr', 'node', 'php', 'php7', 'python', 'ruby']:
-      env['GOOGLE_APPLICATION_CREDENTIALS'] = key_filepath
-    else:
-      cmdargs += [key_file_arg]
+    if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']:
+        if language in [
+                'csharp', 'csharpcoreclr', 'node', 'php', 'php7', 'python',
+                'ruby'
+        ]:
+            env['GOOGLE_APPLICATION_CREDENTIALS'] = key_filepath
+        else:
+            cmdargs += [key_file_arg]
 
-  if test_case in ['per_rpc_creds', 'oauth2_auth_token']:
-    cmdargs += [oauth_scope_arg]
+    if test_case in ['per_rpc_creds', 'oauth2_auth_token']:
+        cmdargs += [oauth_scope_arg]
 
-  if test_case == 'oauth2_auth_token' and language == 'c++':
-    # C++ oauth2 test uses GCE creds and thus needs to know the default account
-    cmdargs += [default_account_arg]
+    if test_case == 'oauth2_auth_token' and language == 'c++':
+        # C++ oauth2 test uses GCE creds and thus needs to know the default account
+        cmdargs += [default_account_arg]
 
-  if test_case == 'compute_engine_creds':
-    cmdargs += [oauth_scope_arg, default_account_arg]
+    if test_case == 'compute_engine_creds':
+        cmdargs += [oauth_scope_arg, default_account_arg]
 
-  return (cmdargs, env)
+    return (cmdargs, env)
 
 
 def _job_kill_handler(job):
-  if job._spec.container_name:
-    dockerjob.docker_kill(job._spec.container_name)
-    # When the job times out and we decide to kill it,
-    # we need to wait a before restarting the job
-    # to prevent "container name already in use" error.
-    # TODO(jtattermusch): figure out a cleaner way to to this.
-    time.sleep(2)
+    if job._spec.container_name:
+        dockerjob.docker_kill(job._spec.container_name)
+        # When the job times out and we decide to kill it,
+        # we need to wait a before restarting the job
+        # to prevent "container name already in use" error.
+        # TODO(jtattermusch): figure out a cleaner way to to this.
+        time.sleep(2)
 
 
-def cloud_to_prod_jobspec(language, test_case, server_host_name,
-                          server_host_detail, docker_image=None, auth=False,
+def cloud_to_prod_jobspec(language,
+                          test_case,
+                          server_host_name,
+                          server_host_detail,
+                          docker_image=None,
+                          auth=False,
                           manual_cmd_log=None):
-  """Creates jobspec for cloud-to-prod interop test"""
-  container_name = None
-  cmdargs = [
-      '--server_host=%s' % server_host_detail[0],
-      '--server_host_override=%s' % server_host_detail[1],
-      '--server_port=443',
-      '--use_tls=true',
-      '--test_case=%s' % test_case]
-  environ = dict(language.cloud_to_prod_env(), **language.global_env())
-  if auth:
-    auth_cmdargs, auth_env = auth_options(language, test_case)
-    cmdargs += auth_cmdargs
-    environ.update(auth_env)
-  cmdline = bash_cmdline(language.client_cmd(cmdargs))
-  cwd = language.client_cwd
-
-  if docker_image:
-    container_name = dockerjob.random_name('interop_client_%s' %
-                                           language.safename)
-    cmdline = docker_run_cmdline(cmdline,
-                                 image=docker_image,
-                                 cwd=cwd,
-                                 environ=environ,
-                                 docker_args=['--net=host',
-                                              '--name=%s' % container_name])
-    if manual_cmd_log is not None:
-      if manual_cmd_log == []:
-        manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image)
-      manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
-    cwd = None
-    environ = None
-
-  suite_name='cloud_to_prod_auth' if auth else 'cloud_to_prod'
-  test_job = jobset.JobSpec(
-          cmdline=cmdline,
-          cwd=cwd,
-          environ=environ,
-          shortname='%s:%s:%s:%s' % (suite_name, language, server_host_name,
-                                     test_case),
-          timeout_seconds=_TEST_TIMEOUT,
-          flake_retries=4 if args.allow_flakes else 0,
-          timeout_retries=2 if args.allow_flakes else 0,
-          kill_handler=_job_kill_handler)
-  if docker_image:
-    test_job.container_name = container_name
-  return test_job
-
-
-def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
-                           server_port, docker_image=None, insecure=False,
-                           manual_cmd_log=None):
-  """Creates jobspec for cloud-to-cloud interop test"""
-  interop_only_options = [
-      '--server_host_override=foo.test.google.fr',
-      '--use_tls=%s' % ('false' if insecure else 'true'),
-      '--use_test_ca=true',
-  ]
-
-  client_test_case = test_case
-  if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
-    client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[test_case]
-  if client_test_case in language.unimplemented_test_cases():
-    print('asking client %s to run unimplemented test case %s' % (repr(language), client_test_case))
-    sys.exit(1)
-
-  common_options = [
-      '--test_case=%s' % client_test_case,
-      '--server_host=%s' % server_host,
-      '--server_port=%s' % server_port,
-  ]
-
-  if test_case in _HTTP2_SERVER_TEST_CASES:
-    if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
-      client_options = interop_only_options + common_options
-      cmdline = bash_cmdline(language.client_cmd(client_options))
-      cwd = language.client_cwd
-    else:
-      cmdline = bash_cmdline(language.client_cmd_http2interop(common_options))
-      cwd = language.http2_cwd
-  else:
-    cmdline = bash_cmdline(language.client_cmd(common_options+interop_only_options))
+    """Creates jobspec for cloud-to-prod interop test"""
+    container_name = None
+    cmdargs = [
+        '--server_host=%s' % server_host_detail[0],
+        '--server_host_override=%s' % server_host_detail[1],
+        '--server_port=443', '--use_tls=true', '--test_case=%s' % test_case
+    ]
+    environ = dict(language.cloud_to_prod_env(), **language.global_env())
+    if auth:
+        auth_cmdargs, auth_env = auth_options(language, test_case)
+        cmdargs += auth_cmdargs
+        environ.update(auth_env)
+    cmdline = bash_cmdline(language.client_cmd(cmdargs))
     cwd = language.client_cwd
 
-  environ = language.global_env()
-  if docker_image and language.safename != 'objc':
-    # we can't run client in docker for objc.
-    container_name = dockerjob.random_name('interop_client_%s' % language.safename)
-    cmdline = docker_run_cmdline(cmdline,
-                                 image=docker_image,
-                                 environ=environ,
-                                 cwd=cwd,
-                                 docker_args=['--net=host',
-                                              '--name=%s' % container_name])
-    if manual_cmd_log is not None:
-      if manual_cmd_log == []:
-        manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image)
-      manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
-    cwd = None
+    if docker_image:
+        container_name = dockerjob.random_name('interop_client_%s' %
+                                               language.safename)
+        cmdline = docker_run_cmdline(
+            cmdline,
+            image=docker_image,
+            cwd=cwd,
+            environ=environ,
+            docker_args=['--net=host', '--name=%s' % container_name])
+        if manual_cmd_log is not None:
+            if manual_cmd_log == []:
+                manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
+                                      docker_image)
+            manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
+        cwd = None
+        environ = None
 
-  test_job = jobset.JobSpec(
-          cmdline=cmdline,
-          cwd=cwd,
-          environ=environ,
-          shortname='cloud_to_cloud:%s:%s_server:%s' % (language, server_name,
-                                                        test_case),
-          timeout_seconds=_TEST_TIMEOUT,
-          flake_retries=4 if args.allow_flakes else 0,
-          timeout_retries=2 if args.allow_flakes else 0,
-          kill_handler=_job_kill_handler)
-  if docker_image:
-    test_job.container_name = container_name
-  return test_job
+    suite_name = 'cloud_to_prod_auth' if auth else 'cloud_to_prod'
+    test_job = jobset.JobSpec(
+        cmdline=cmdline,
+        cwd=cwd,
+        environ=environ,
+        shortname='%s:%s:%s:%s' % (suite_name, language, server_host_name,
+                                   test_case),
+        timeout_seconds=_TEST_TIMEOUT,
+        flake_retries=4 if args.allow_flakes else 0,
+        timeout_retries=2 if args.allow_flakes else 0,
+        kill_handler=_job_kill_handler)
+    if docker_image:
+        test_job.container_name = container_name
+    return test_job
+
+
+def cloud_to_cloud_jobspec(language,
+                           test_case,
+                           server_name,
+                           server_host,
+                           server_port,
+                           docker_image=None,
+                           insecure=False,
+                           manual_cmd_log=None):
+    """Creates jobspec for cloud-to-cloud interop test"""
+    interop_only_options = [
+        '--server_host_override=foo.test.google.fr',
+        '--use_tls=%s' % ('false' if insecure else 'true'),
+        '--use_test_ca=true',
+    ]
+
+    client_test_case = test_case
+    if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+        client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[
+            test_case]
+    if client_test_case in language.unimplemented_test_cases():
+        print('asking client %s to run unimplemented test case %s' %
+              (repr(language), client_test_case))
+        sys.exit(1)
+
+    common_options = [
+        '--test_case=%s' % client_test_case,
+        '--server_host=%s' % server_host,
+        '--server_port=%s' % server_port,
+    ]
+
+    if test_case in _HTTP2_SERVER_TEST_CASES:
+        if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+            client_options = interop_only_options + common_options
+            cmdline = bash_cmdline(language.client_cmd(client_options))
+            cwd = language.client_cwd
+        else:
+            cmdline = bash_cmdline(
+                language.client_cmd_http2interop(common_options))
+            cwd = language.http2_cwd
+    else:
+        cmdline = bash_cmdline(
+            language.client_cmd(common_options + interop_only_options))
+        cwd = language.client_cwd
+
+    environ = language.global_env()
+    if docker_image and language.safename != 'objc':
+        # we can't run client in docker for objc.
+        container_name = dockerjob.random_name('interop_client_%s' %
+                                               language.safename)
+        cmdline = docker_run_cmdline(
+            cmdline,
+            image=docker_image,
+            environ=environ,
+            cwd=cwd,
+            docker_args=['--net=host', '--name=%s' % container_name])
+        if manual_cmd_log is not None:
+            if manual_cmd_log == []:
+                manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
+                                      docker_image)
+            manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
+        cwd = None
+
+    test_job = jobset.JobSpec(
+        cmdline=cmdline,
+        cwd=cwd,
+        environ=environ,
+        shortname='cloud_to_cloud:%s:%s_server:%s' % (language, server_name,
+                                                      test_case),
+        timeout_seconds=_TEST_TIMEOUT,
+        flake_retries=4 if args.allow_flakes else 0,
+        timeout_retries=2 if args.allow_flakes else 0,
+        kill_handler=_job_kill_handler)
+    if docker_image:
+        test_job.container_name = container_name
+    return test_job
 
 
 def server_jobspec(language, docker_image, insecure=False, manual_cmd_log=None):
-  """Create jobspec for running a server"""
-  container_name = dockerjob.random_name('interop_server_%s' % language.safename)
-  cmdline = bash_cmdline(
-      language.server_cmd(['--port=%s' % _DEFAULT_SERVER_PORT,
-                           '--use_tls=%s' % ('false' if insecure else 'true')]))
-  environ = language.global_env()
-  docker_args = ['--name=%s' % container_name]
-  if language.safename == 'http2':
-    # we are running the http2 interop server. Open next N ports beginning
-    # with the server port. These ports are used for http2 interop test
-    # (one test case per port).
-    docker_args += list(
-        itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i))
-                                      for i in range(
-                                          len(_HTTP2_SERVER_TEST_CASES))))
-    # Enable docker's healthcheck mechanism.
-    # This runs a Python script inside the container every second. The script
-    # pings the http2 server to verify it is ready. The 'health-retries' flag
-    # specifies the number of consecutive failures before docker will report
-    # the container's status as 'unhealthy'. Prior to the first 'health_retries'
-    # failures or the first success, the status will be 'starting'. 'docker ps'
-    # or 'docker inspect' can be used to see the health of the container on the
-    # command line.
-    docker_args += [
-        '--health-cmd=python test/http2_test/http2_server_health_check.py '
-        '--server_host=%s --server_port=%d'
-        % ('localhost', _DEFAULT_SERVER_PORT),
-        '--health-interval=1s',
-        '--health-retries=5',
-        '--health-timeout=10s',
-    ]
+    """Create jobspec for running a server"""
+    container_name = dockerjob.random_name('interop_server_%s' %
+                                           language.safename)
+    cmdline = bash_cmdline(
+        language.server_cmd([
+            '--port=%s' % _DEFAULT_SERVER_PORT, '--use_tls=%s' % (
+                'false' if insecure else 'true')
+        ]))
+    environ = language.global_env()
+    docker_args = ['--name=%s' % container_name]
+    if language.safename == 'http2':
+        # we are running the http2 interop server. Open next N ports beginning
+        # with the server port. These ports are used for http2 interop test
+        # (one test case per port).
+        docker_args += list(
+            itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i))
+                                          for i in range(
+                                              len(_HTTP2_SERVER_TEST_CASES))))
+        # Enable docker's healthcheck mechanism.
+        # This runs a Python script inside the container every second. The script
+        # pings the http2 server to verify it is ready. The 'health-retries' flag
+        # specifies the number of consecutive failures before docker will report
+        # the container's status as 'unhealthy'. Prior to the first 'health_retries'
+        # failures or the first success, the status will be 'starting'. 'docker ps'
+        # or 'docker inspect' can be used to see the health of the container on the
+        # command line.
+        docker_args += [
+            '--health-cmd=python test/http2_test/http2_server_health_check.py '
+            '--server_host=%s --server_port=%d' %
+            ('localhost', _DEFAULT_SERVER_PORT),
+            '--health-interval=1s',
+            '--health-retries=5',
+            '--health-timeout=10s',
+        ]
 
-  else:
-    docker_args += ['-p', str(_DEFAULT_SERVER_PORT)]
+    else:
+        docker_args += ['-p', str(_DEFAULT_SERVER_PORT)]
 
-  docker_cmdline = docker_run_cmdline(cmdline,
-                                      image=docker_image,
-                                      cwd=language.server_cwd,
-                                      environ=environ,
-                                      docker_args=docker_args)
-  if manual_cmd_log is not None:
-      if manual_cmd_log == []:
-        manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image)
-      manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image))
-  server_job = jobset.JobSpec(
-          cmdline=docker_cmdline,
-          environ=environ,
-          shortname='interop_server_%s' % language,
-          timeout_seconds=30*60)
-  server_job.container_name = container_name
-  return server_job
+    docker_cmdline = docker_run_cmdline(
+        cmdline,
+        image=docker_image,
+        cwd=language.server_cwd,
+        environ=environ,
+        docker_args=docker_args)
+    if manual_cmd_log is not None:
+        if manual_cmd_log == []:
+            manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
+                                  docker_image)
+        manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image))
+    server_job = jobset.JobSpec(
+        cmdline=docker_cmdline,
+        environ=environ,
+        shortname='interop_server_%s' % language,
+        timeout_seconds=30 * 60)
+    server_job.container_name = container_name
+    return server_job
 
 
 def build_interop_image_jobspec(language, tag=None):
-  """Creates jobspec for building interop docker image for a language"""
-  if not tag:
-    tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4())
-  env = {'INTEROP_IMAGE': tag,
-         'BASE_NAME': 'grpc_interop_%s' % language.safename}
-  if not args.travis:
-    env['TTY_FLAG'] = '-t'
-  # This env variable is used to get around the github rate limit
-  # error when running the PHP `composer install` command
-  host_file = '%s/.composer/auth.json' % os.environ['HOME']
-  if language.safename == 'php' and os.path.exists(host_file):
-    env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \
-      '-v %s:/root/.composer/auth.json:ro' % host_file
-  build_job = jobset.JobSpec(
-          cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
-          environ=env,
-          shortname='build_docker_%s' % (language),
-          timeout_seconds=30*60)
-  build_job.tag = tag
-  return build_job
+    """Creates jobspec for building interop docker image for a language"""
+    if not tag:
+        tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4())
+    env = {
+        'INTEROP_IMAGE': tag,
+        'BASE_NAME': 'grpc_interop_%s' % language.safename
+    }
+    if not args.travis:
+        env['TTY_FLAG'] = '-t'
+    # This env variable is used to get around the github rate limit
+    # error when running the PHP `composer install` command
+    host_file = '%s/.composer/auth.json' % os.environ['HOME']
+    if language.safename == 'php' and os.path.exists(host_file):
+        env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \
+          '-v %s:/root/.composer/auth.json:ro' % host_file
+    build_job = jobset.JobSpec(
+        cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
+        environ=env,
+        shortname='build_docker_%s' % (language),
+        timeout_seconds=30 * 60)
+    build_job.tag = tag
+    return build_job
 
 
 def aggregate_http2_results(stdout):
-  match = re.search(r'\{"cases[^\]]*\]\}', stdout)
-  if not match:
-    return None
+    match = re.search(r'\{"cases[^\]]*\]\}', stdout)
+    if not match:
+        return None
 
-  results = json.loads(match.group(0))
-  skipped = 0
-  passed = 0
-  failed = 0
-  failed_cases = []
-  for case in results['cases']:
-    if case.get('skipped', False):
-      skipped += 1
-    else:
-      if case.get('passed', False):
-        passed += 1
-      else:
-        failed += 1
-        failed_cases.append(case.get('name', "NONAME"))
-  return {
-    'passed': passed,
-    'failed': failed,
-    'skipped': skipped,
-    'failed_cases': ', '.join(failed_cases),
-    'percent': 1.0 * passed / (passed + failed)
-  }
+    results = json.loads(match.group(0))
+    skipped = 0
+    passed = 0
+    failed = 0
+    failed_cases = []
+    for case in results['cases']:
+        if case.get('skipped', False):
+            skipped += 1
+        else:
+            if case.get('passed', False):
+                passed += 1
+            else:
+                failed += 1
+                failed_cases.append(case.get('name', "NONAME"))
+    return {
+        'passed': passed,
+        'failed': failed,
+        'skipped': skipped,
+        'failed_cases': ', '.join(failed_cases),
+        'percent': 1.0 * passed / (passed + failed)
+    }
+
 
 # A dictionary of prod servers to test.
 # Format: server_name: (server_host, server_host_override, errors_allowed)
 # TODO(adelez): implement logic for errors_allowed where if the indicated tests
 # fail, they don't impact the overall test result.
 prod_servers = {
-    'default': ('216.239.32.254',
-                'grpc-test.sandbox.googleapis.com', False),
-    'gateway_v2': ('216.239.32.254',
-                   'grpc-test2.sandbox.googleapis.com', True),
+    'default': ('216.239.32.254', 'grpc-test.sandbox.googleapis.com', False),
+    'gateway_v2': ('216.239.32.254', 'grpc-test2.sandbox.googleapis.com', True),
     'cloud_gateway': ('216.239.32.255', 'grpc-test.sandbox.googleapis.com',
                       False),
     'cloud_gateway_v2': ('216.239.32.255', 'grpc-test2.sandbox.googleapis.com',
                          True),
-    'gateway_v4': ('216.239.32.254',
-                   'grpc-test4.sandbox.googleapis.com', True),
+    'gateway_v4': ('216.239.32.254', 'grpc-test4.sandbox.googleapis.com', True),
     'cloud_gateway_v4': ('216.239.32.255', 'grpc-test4.sandbox.googleapis.com',
                          True),
 }
 
 argp = argparse.ArgumentParser(description='Run interop tests.')
-argp.add_argument('-l', '--language',
-                  choices=['all'] + sorted(_LANGUAGES),
-                  nargs='+',
-                  default=['all'],
-                  help='Clients to run. Objc client can be only run on OSX.')
+argp.add_argument(
+    '-l',
+    '--language',
+    choices=['all'] + sorted(_LANGUAGES),
+    nargs='+',
+    default=['all'],
+    help='Clients to run. Objc client can be only run on OSX.')
 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
-argp.add_argument('--cloud_to_prod',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Run cloud_to_prod tests.')
-argp.add_argument('--cloud_to_prod_auth',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Run cloud_to_prod_auth tests.')
-argp.add_argument('--prod_servers',
-                  choices=prod_servers.keys(),
-                  default=['default'],
-                  nargs='+',
-                  help=('The servers to run cloud_to_prod and '
-                        'cloud_to_prod_auth tests against.'))
-argp.add_argument('-s', '--server',
-                  choices=['all'] + sorted(_SERVERS),
-                  nargs='+',
-                  help='Run cloud_to_cloud servers in a separate docker ' +
-                       'image. Servers can only be started automatically if ' +
-                       '--use_docker option is enabled.',
-                  default=[])
-argp.add_argument('--override_server',
-                  action='append',
-                  type=lambda kv: kv.split('='),
-                  help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
-                  default=[])
-argp.add_argument('-t', '--travis',
-                  default=False,
-                  action='store_const',
-                  const=True)
-argp.add_argument('-v', '--verbose',
-                  default=False,
-                  action='store_const',
-                  const=True)
-argp.add_argument('--use_docker',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Run all the interop tests under docker. That provides ' +
-                  'additional isolation and prevents the need to install ' +
-                  'language specific prerequisites. Only available on Linux.')
-argp.add_argument('--allow_flakes',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
-argp.add_argument('--manual_run',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Prepare things for running interop tests manually. ' +
-                  'Preserve docker images after building them and skip '
-                  'actually running the tests. Only print commands to run by ' +
-                  'hand.')
-argp.add_argument('--http2_interop',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Enable HTTP/2 client edge case testing. (Bad client, good server)')
-argp.add_argument('--http2_server_interop',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Enable HTTP/2 server edge case testing. (Includes positive and negative tests')
-argp.add_argument('--insecure',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Whether to use secure channel.')
-argp.add_argument('--internal_ci',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help=('Put reports into subdirectories to improve '
-                        'presentation of results by Internal CI.'))
-argp.add_argument('--bq_result_table',
-                  default='',
-                  type=str,
-                  nargs='?',
-                  help='Upload test results to a specified BQ table.')
+argp.add_argument(
+    '--cloud_to_prod',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Run cloud_to_prod tests.')
+argp.add_argument(
+    '--cloud_to_prod_auth',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Run cloud_to_prod_auth tests.')
+argp.add_argument(
+    '--prod_servers',
+    choices=prod_servers.keys(),
+    default=['default'],
+    nargs='+',
+    help=('The servers to run cloud_to_prod and '
+          'cloud_to_prod_auth tests against.'))
+argp.add_argument(
+    '-s',
+    '--server',
+    choices=['all'] + sorted(_SERVERS),
+    nargs='+',
+    help='Run cloud_to_cloud servers in a separate docker ' +
+    'image. Servers can only be started automatically if ' +
+    '--use_docker option is enabled.',
+    default=[])
+argp.add_argument(
+    '--override_server',
+    action='append',
+    type=lambda kv: kv.split('='),
+    help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
+    default=[])
+argp.add_argument(
+    '-t', '--travis', default=False, action='store_const', const=True)
+argp.add_argument(
+    '-v', '--verbose', default=False, action='store_const', const=True)
+argp.add_argument(
+    '--use_docker',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Run all the interop tests under docker. That provides ' +
+    'additional isolation and prevents the need to install ' +
+    'language specific prerequisites. Only available on Linux.')
+argp.add_argument(
+    '--allow_flakes',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Allow flaky tests to show as passing (re-runs failed tests up to five times)'
+)
+argp.add_argument(
+    '--manual_run',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Prepare things for running interop tests manually. ' +
+    'Preserve docker images after building them and skip '
+    'actually running the tests. Only print commands to run by ' + 'hand.')
+argp.add_argument(
+    '--http2_interop',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Enable HTTP/2 client edge case testing. (Bad client, good server)')
+argp.add_argument(
+    '--http2_server_interop',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Enable HTTP/2 server edge case testing. (Includes positive and negative tests'
+)
+argp.add_argument(
+    '--insecure',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Whether to use secure channel.')
+argp.add_argument(
+    '--internal_ci',
+    default=False,
+    action='store_const',
+    const=True,
+    help=('Put reports into subdirectories to improve '
+          'presentation of results by Internal CI.'))
+argp.add_argument(
+    '--bq_result_table',
+    default='',
+    type=str,
+    nargs='?',
+    help='Upload test results to a specified BQ table.')
 args = argp.parse_args()
 
-servers = set(s for s in itertools.chain.from_iterable(_SERVERS
-                                                       if x == 'all' else [x]
-                                                       for x in args.server))
+servers = set(
+    s
+    for s in itertools.chain.from_iterable(_SERVERS if x == 'all' else [x]
+                                           for x in args.server))
 
 if args.use_docker:
-  if not args.travis:
-    print('Seen --use_docker flag, will run interop tests under docker.')
-    print('')
-    print('IMPORTANT: The changes you are testing need to be locally committed')
-    print('because only the committed changes in the current branch will be')
-    print('copied to the docker environment.')
-    time.sleep(5)
+    if not args.travis:
+        print('Seen --use_docker flag, will run interop tests under docker.')
+        print('')
+        print(
+            'IMPORTANT: The changes you are testing need to be locally committed'
+        )
+        print(
+            'because only the committed changes in the current branch will be')
+        print('copied to the docker environment.')
+        time.sleep(5)
 
 if args.manual_run and not args.use_docker:
-  print('--manual_run is only supported with --use_docker option enabled.')
-  sys.exit(1)
+    print('--manual_run is only supported with --use_docker option enabled.')
+    sys.exit(1)
 
 if not args.use_docker and servers:
-  print('Running interop servers is only supported with --use_docker option enabled.')
-  sys.exit(1)
-
+    print(
+        'Running interop servers is only supported with --use_docker option enabled.'
+    )
+    sys.exit(1)
 
 # we want to include everything but objc in 'all'
 # because objc won't run on non-mac platforms
 all_but_objc = set(six.iterkeys(_LANGUAGES)) - set(['objc'])
-languages = set(_LANGUAGES[l]
-                for l in itertools.chain.from_iterable(
-                    all_but_objc if x == 'all' else [x]
-                    for x in args.language))
+languages = set(
+    _LANGUAGES[l]
+    for l in itertools.chain.from_iterable(all_but_objc if x == 'all' else [x]
+                                           for x in args.language))
 
 languages_http2_clients_for_http2_server_interop = set()
 if args.http2_server_interop:
-  languages_http2_clients_for_http2_server_interop = set(
-      _LANGUAGES[l] for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES
-      if 'all' in args.language or l in args.language)
+    languages_http2_clients_for_http2_server_interop = set(
+        _LANGUAGES[l]
+        for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES
+        if 'all' in args.language or l in args.language)
 
 http2Interop = Http2Client() if args.http2_interop else None
 http2InteropServer = Http2Server() if args.http2_server_interop else None
 
-docker_images={}
+docker_images = {}
 if args.use_docker:
-  # languages for which to build docker images
-  languages_to_build = set(
-      _LANGUAGES[k] for k in set([str(l) for l in languages] + [s for s in servers]))
-  languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop
+    # languages for which to build docker images
+    languages_to_build = set(
+        _LANGUAGES[k]
+        for k in set([str(l) for l in languages] + [s for s in servers]))
+    languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop
 
-  if args.http2_interop:
-    languages_to_build.add(http2Interop)
+    if args.http2_interop:
+        languages_to_build.add(http2Interop)
 
-  if args.http2_server_interop:
-    languages_to_build.add(http2InteropServer)
+    if args.http2_server_interop:
+        languages_to_build.add(http2InteropServer)
 
-  build_jobs = []
-  for l in languages_to_build:
-    if str(l) == 'objc':
-      # we don't need to build a docker image for objc
-      continue
-    job = build_interop_image_jobspec(l)
-    docker_images[str(l)] = job.tag
-    build_jobs.append(job)
+    build_jobs = []
+    for l in languages_to_build:
+        if str(l) == 'objc':
+            # we don't need to build a docker image for objc
+            continue
+        job = build_interop_image_jobspec(l)
+        docker_images[str(l)] = job.tag
+        build_jobs.append(job)
 
-  if build_jobs:
-    jobset.message('START', 'Building interop docker images.', do_newline=True)
-    if args.verbose:
-      print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
+    if build_jobs:
+        jobset.message(
+            'START', 'Building interop docker images.', do_newline=True)
+        if args.verbose:
+            print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
 
-    num_failures, _ = jobset.run(
-        build_jobs, newline_on_success=True, maxjobs=args.jobs)
-    if num_failures == 0:
-      jobset.message('SUCCESS', 'All docker images built successfully.',
-                     do_newline=True)
-    else:
-      jobset.message('FAILED', 'Failed to build interop docker images.',
-                     do_newline=True)
-      for image in six.itervalues(docker_images):
-        dockerjob.remove_image(image, skip_nonexistent=True)
-      sys.exit(1)
+        num_failures, _ = jobset.run(
+            build_jobs, newline_on_success=True, maxjobs=args.jobs)
+        if num_failures == 0:
+            jobset.message(
+                'SUCCESS',
+                'All docker images built successfully.',
+                do_newline=True)
+        else:
+            jobset.message(
+                'FAILED',
+                'Failed to build interop docker images.',
+                do_newline=True)
+            for image in six.itervalues(docker_images):
+                dockerjob.remove_image(image, skip_nonexistent=True)
+            sys.exit(1)
 
 server_manual_cmd_log = [] if args.manual_run else None
 client_manual_cmd_log = [] if args.manual_run else None
@@ -1056,214 +1134,236 @@
 server_jobs = {}
 server_addresses = {}
 try:
-  for s in servers:
-    lang = str(s)
-    spec = server_jobspec(_LANGUAGES[lang], docker_images.get(lang),
-                          args.insecure, manual_cmd_log=server_manual_cmd_log)
-    if not args.manual_run:
-      job = dockerjob.DockerJob(spec)
-      server_jobs[lang] = job
-      server_addresses[lang] = ('localhost', job.mapped_port(_DEFAULT_SERVER_PORT))
-    else:
-      # don't run the server, set server port to a placeholder value
-      server_addresses[lang] = ('localhost', '${SERVER_PORT}')
-
-  http2_server_job = None
-  if args.http2_server_interop:
-    # launch a HTTP2 server emulator that creates edge cases
-    lang = str(http2InteropServer)
-    spec = server_jobspec(http2InteropServer, docker_images.get(lang),
-                          manual_cmd_log=server_manual_cmd_log)
-    if not args.manual_run:
-      http2_server_job = dockerjob.DockerJob(spec)
-      server_jobs[lang] = http2_server_job
-    else:
-      # don't run the server, set server port to a placeholder value
-      server_addresses[lang] = ('localhost', '${SERVER_PORT}')
-
-  jobs = []
-  if args.cloud_to_prod:
-    if args.insecure:
-      print('TLS is always enabled for cloud_to_prod scenarios.')
-    for server_host_name in args.prod_servers:
-      for language in languages:
-        for test_case in _TEST_CASES:
-          if not test_case in language.unimplemented_test_cases():
-            if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION:
-              test_job = cloud_to_prod_jobspec(
-                  language, test_case, server_host_name,
-                  prod_servers[server_host_name],
-                  docker_image=docker_images.get(str(language)),
-                  manual_cmd_log=client_manual_cmd_log)
-              jobs.append(test_job)
-
-      if args.http2_interop:
-        for test_case in _HTTP2_TEST_CASES:
-          test_job = cloud_to_prod_jobspec(
-              http2Interop, test_case, server_host_name,
-              prod_servers[server_host_name],
-              docker_image=docker_images.get(str(http2Interop)),
-              manual_cmd_log=client_manual_cmd_log)
-          jobs.append(test_job)
-
-  if args.cloud_to_prod_auth:
-    if args.insecure:
-      print('TLS is always enabled for cloud_to_prod scenarios.')
-    for server_host_name in args.prod_servers:
-      for language in languages:
-        for test_case in _AUTH_TEST_CASES:
-          if not test_case in language.unimplemented_test_cases():
-            test_job = cloud_to_prod_jobspec(
-                language, test_case, server_host_name,
-                prod_servers[server_host_name],
-                docker_image=docker_images.get(str(language)), auth=True,
-                manual_cmd_log=client_manual_cmd_log)
-            jobs.append(test_job)
-
-  for server in args.override_server:
-    server_name = server[0]
-    (server_host, server_port) = server[1].split(':')
-    server_addresses[server_name] = (server_host, server_port)
-
-  for server_name, server_address in server_addresses.items():
-    (server_host, server_port) = server_address
-    server_language = _LANGUAGES.get(server_name, None)
-    skip_server = []  # test cases unimplemented by server
-    if server_language:
-      skip_server = server_language.unimplemented_test_cases_server()
-    for language in languages:
-      for test_case in _TEST_CASES:
-        if not test_case in language.unimplemented_test_cases():
-          if not test_case in skip_server:
-            test_job = cloud_to_cloud_jobspec(language,
-                                              test_case,
-                                              server_name,
-                                              server_host,
-                                              server_port,
-                                              docker_image=docker_images.get(str(language)),
-                                              insecure=args.insecure,
-                                              manual_cmd_log=client_manual_cmd_log)
-            jobs.append(test_job)
-
-    if args.http2_interop:
-      for test_case in _HTTP2_TEST_CASES:
-        if server_name == "go":
-          # TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434
-          continue
-        test_job = cloud_to_cloud_jobspec(http2Interop,
-                                          test_case,
-                                          server_name,
-                                          server_host,
-                                          server_port,
-                                          docker_image=docker_images.get(str(http2Interop)),
-                                          insecure=args.insecure,
-                                          manual_cmd_log=client_manual_cmd_log)
-        jobs.append(test_job)
-
-  if args.http2_server_interop:
-    if not args.manual_run:
-      http2_server_job.wait_for_healthy(timeout_seconds=600)
-    for language in languages_http2_clients_for_http2_server_interop:
-      for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS):
-        offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
-        server_port = _DEFAULT_SERVER_PORT+offset
+    for s in servers:
+        lang = str(s)
+        spec = server_jobspec(
+            _LANGUAGES[lang],
+            docker_images.get(lang),
+            args.insecure,
+            manual_cmd_log=server_manual_cmd_log)
         if not args.manual_run:
-          server_port = http2_server_job.mapped_port(server_port)
-        test_job = cloud_to_cloud_jobspec(language,
-                                          test_case,
-                                          str(http2InteropServer),
-                                          'localhost',
-                                          server_port,
-                                          docker_image=docker_images.get(str(language)),
-                                          manual_cmd_log=client_manual_cmd_log)
-        jobs.append(test_job)
-    for language in languages:
-      # HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of
-      # HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather
-      # than specialized http2 clients, reusing existing test implementations.
-      # For example, in the "data_frame_padding" test, use language's gRPC
-      # interop clients and make them think that theyre running "large_unary"
-      # test case. This avoids implementing a new test case in each language.
-      for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
-        if test_case not in language.unimplemented_test_cases():
-          offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
-          server_port = _DEFAULT_SERVER_PORT+offset
-          if not args.manual_run:
-            server_port = http2_server_job.mapped_port(server_port)
-          if not args.insecure:
-            print(('Creating grpc cient to http2 server test case with insecure connection, even though'
-                   ' args.insecure is False. Http2 test server only supports insecure connections.'))
-          test_job = cloud_to_cloud_jobspec(language,
-                                            test_case,
-                                            str(http2InteropServer),
-                                            'localhost',
-                                            server_port,
-                                            docker_image=docker_images.get(str(language)),
-                                            insecure=True,
-                                            manual_cmd_log=client_manual_cmd_log)
-          jobs.append(test_job)
+            job = dockerjob.DockerJob(spec)
+            server_jobs[lang] = job
+            server_addresses[lang] = ('localhost',
+                                      job.mapped_port(_DEFAULT_SERVER_PORT))
+        else:
+            # don't run the server, set server port to a placeholder value
+            server_addresses[lang] = ('localhost', '${SERVER_PORT}')
 
-  if not jobs:
-    print('No jobs to run.')
-    for image in six.itervalues(docker_images):
-      dockerjob.remove_image(image, skip_nonexistent=True)
-    sys.exit(1)
+    http2_server_job = None
+    if args.http2_server_interop:
+        # launch a HTTP2 server emulator that creates edge cases
+        lang = str(http2InteropServer)
+        spec = server_jobspec(
+            http2InteropServer,
+            docker_images.get(lang),
+            manual_cmd_log=server_manual_cmd_log)
+        if not args.manual_run:
+            http2_server_job = dockerjob.DockerJob(spec)
+            server_jobs[lang] = http2_server_job
+        else:
+            # don't run the server, set server port to a placeholder value
+            server_addresses[lang] = ('localhost', '${SERVER_PORT}')
 
-  if args.manual_run:
-    print('All tests will skipped --manual_run option is active.')
+    jobs = []
+    if args.cloud_to_prod:
+        if args.insecure:
+            print('TLS is always enabled for cloud_to_prod scenarios.')
+        for server_host_name in args.prod_servers:
+            for language in languages:
+                for test_case in _TEST_CASES:
+                    if not test_case in language.unimplemented_test_cases():
+                        if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION:
+                            test_job = cloud_to_prod_jobspec(
+                                language,
+                                test_case,
+                                server_host_name,
+                                prod_servers[server_host_name],
+                                docker_image=docker_images.get(str(language)),
+                                manual_cmd_log=client_manual_cmd_log)
+                            jobs.append(test_job)
 
-  if args.verbose:
-    print('Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs))
+            if args.http2_interop:
+                for test_case in _HTTP2_TEST_CASES:
+                    test_job = cloud_to_prod_jobspec(
+                        http2Interop,
+                        test_case,
+                        server_host_name,
+                        prod_servers[server_host_name],
+                        docker_image=docker_images.get(str(http2Interop)),
+                        manual_cmd_log=client_manual_cmd_log)
+                    jobs.append(test_job)
 
-  num_failures, resultset = jobset.run(jobs, newline_on_success=True,
-                                       maxjobs=args.jobs,
-                                       skip_jobs=args.manual_run)
-  if args.bq_result_table and resultset:
-    upload_interop_results_to_bq(resultset, args.bq_result_table, args)
-  if num_failures:
-    jobset.message('FAILED', 'Some tests failed', do_newline=True)
-  else:
-    jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+    if args.cloud_to_prod_auth:
+        if args.insecure:
+            print('TLS is always enabled for cloud_to_prod scenarios.')
+        for server_host_name in args.prod_servers:
+            for language in languages:
+                for test_case in _AUTH_TEST_CASES:
+                    if not test_case in language.unimplemented_test_cases():
+                        test_job = cloud_to_prod_jobspec(
+                            language,
+                            test_case,
+                            server_host_name,
+                            prod_servers[server_host_name],
+                            docker_image=docker_images.get(str(language)),
+                            auth=True,
+                            manual_cmd_log=client_manual_cmd_log)
+                        jobs.append(test_job)
 
-  write_cmdlog_maybe(server_manual_cmd_log, 'interop_server_cmds.sh')
-  write_cmdlog_maybe(client_manual_cmd_log, 'interop_client_cmds.sh')
+    for server in args.override_server:
+        server_name = server[0]
+        (server_host, server_port) = server[1].split(':')
+        server_addresses[server_name] = (server_host, server_port)
 
-  xml_report_name = _XML_REPORT
-  if args.internal_ci:
-    xml_report_name = _INTERNAL_CL_XML_REPORT
-  report_utils.render_junit_xml_report(resultset, xml_report_name)
+    for server_name, server_address in server_addresses.items():
+        (server_host, server_port) = server_address
+        server_language = _LANGUAGES.get(server_name, None)
+        skip_server = []  # test cases unimplemented by server
+        if server_language:
+            skip_server = server_language.unimplemented_test_cases_server()
+        for language in languages:
+            for test_case in _TEST_CASES:
+                if not test_case in language.unimplemented_test_cases():
+                    if not test_case in skip_server:
+                        test_job = cloud_to_cloud_jobspec(
+                            language,
+                            test_case,
+                            server_name,
+                            server_host,
+                            server_port,
+                            docker_image=docker_images.get(str(language)),
+                            insecure=args.insecure,
+                            manual_cmd_log=client_manual_cmd_log)
+                        jobs.append(test_job)
 
-  for name, job in resultset.items():
-    if "http2" in name:
-      job[0].http2results = aggregate_http2_results(job[0].message)
+        if args.http2_interop:
+            for test_case in _HTTP2_TEST_CASES:
+                if server_name == "go":
+                    # TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434
+                    continue
+                test_job = cloud_to_cloud_jobspec(
+                    http2Interop,
+                    test_case,
+                    server_name,
+                    server_host,
+                    server_port,
+                    docker_image=docker_images.get(str(http2Interop)),
+                    insecure=args.insecure,
+                    manual_cmd_log=client_manual_cmd_log)
+                jobs.append(test_job)
 
-  http2_server_test_cases = (
-      _HTTP2_SERVER_TEST_CASES if args.http2_server_interop else [])
+    if args.http2_server_interop:
+        if not args.manual_run:
+            http2_server_job.wait_for_healthy(timeout_seconds=600)
+        for language in languages_http2_clients_for_http2_server_interop:
+            for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(
+                    _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS):
+                offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
+                server_port = _DEFAULT_SERVER_PORT + offset
+                if not args.manual_run:
+                    server_port = http2_server_job.mapped_port(server_port)
+                test_job = cloud_to_cloud_jobspec(
+                    language,
+                    test_case,
+                    str(http2InteropServer),
+                    'localhost',
+                    server_port,
+                    docker_image=docker_images.get(str(language)),
+                    manual_cmd_log=client_manual_cmd_log)
+                jobs.append(test_job)
+        for language in languages:
+            # HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of
+            # HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather
+            # than specialized http2 clients, reusing existing test implementations.
+            # For example, in the "data_frame_padding" test, use language's gRPC
+            # interop clients and make them think that theyre running "large_unary"
+            # test case. This avoids implementing a new test case in each language.
+            for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+                if test_case not in language.unimplemented_test_cases():
+                    offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
+                    server_port = _DEFAULT_SERVER_PORT + offset
+                    if not args.manual_run:
+                        server_port = http2_server_job.mapped_port(server_port)
+                    if not args.insecure:
+                        print((
+                            'Creating grpc cient to http2 server test case with insecure connection, even though'
+                            ' args.insecure is False. Http2 test server only supports insecure connections.'
+                        ))
+                    test_job = cloud_to_cloud_jobspec(
+                        language,
+                        test_case,
+                        str(http2InteropServer),
+                        'localhost',
+                        server_port,
+                        docker_image=docker_images.get(str(language)),
+                        insecure=True,
+                        manual_cmd_log=client_manual_cmd_log)
+                    jobs.append(test_job)
 
-  report_utils.render_interop_html_report(
-      set([str(l) for l in languages]), servers, _TEST_CASES, _AUTH_TEST_CASES,
-      _HTTP2_TEST_CASES, http2_server_test_cases, resultset, num_failures,
-      args.cloud_to_prod_auth or args.cloud_to_prod, args.prod_servers,
-      args.http2_interop)
+    if not jobs:
+        print('No jobs to run.')
+        for image in six.itervalues(docker_images):
+            dockerjob.remove_image(image, skip_nonexistent=True)
+        sys.exit(1)
 
-  if num_failures:
-    sys.exit(1)
-  else:
-    sys.exit(0)
-except Exception as e:
-  print('exception occurred:')
-  traceback.print_exc(file=sys.stdout)
-finally:
-  # Check if servers are still running.
-  for server, job in server_jobs.items():
-    if not job.is_running():
-      print('Server "%s" has exited prematurely.' % server)
+    if args.manual_run:
+        print('All tests will skipped --manual_run option is active.')
 
-  dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)])
+    if args.verbose:
+        print('Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs))
 
-  for image in six.itervalues(docker_images):
-    if not args.manual_run:
-      print('Removing docker image %s' % image)
-      dockerjob.remove_image(image)
+    num_failures, resultset = jobset.run(
+        jobs,
+        newline_on_success=True,
+        maxjobs=args.jobs,
+        skip_jobs=args.manual_run)
+    if args.bq_result_table and resultset:
+        upload_interop_results_to_bq(resultset, args.bq_result_table, args)
+    if num_failures:
+        jobset.message('FAILED', 'Some tests failed', do_newline=True)
     else:
-      print('Preserving docker image: %s' % image)
+        jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+
+    write_cmdlog_maybe(server_manual_cmd_log, 'interop_server_cmds.sh')
+    write_cmdlog_maybe(client_manual_cmd_log, 'interop_client_cmds.sh')
+
+    xml_report_name = _XML_REPORT
+    if args.internal_ci:
+        xml_report_name = _INTERNAL_CL_XML_REPORT
+    report_utils.render_junit_xml_report(resultset, xml_report_name)
+
+    for name, job in resultset.items():
+        if "http2" in name:
+            job[0].http2results = aggregate_http2_results(job[0].message)
+
+    http2_server_test_cases = (_HTTP2_SERVER_TEST_CASES
+                               if args.http2_server_interop else [])
+
+    report_utils.render_interop_html_report(
+        set([str(l) for l in languages]), servers, _TEST_CASES,
+        _AUTH_TEST_CASES, _HTTP2_TEST_CASES, http2_server_test_cases, resultset,
+        num_failures, args.cloud_to_prod_auth or args.cloud_to_prod,
+        args.prod_servers, args.http2_interop)
+
+    if num_failures:
+        sys.exit(1)
+    else:
+        sys.exit(0)
+except Exception as e:
+    print('exception occurred:')
+    traceback.print_exc(file=sys.stdout)
+finally:
+    # Check if servers are still running.
+    for server, job in server_jobs.items():
+        if not job.is_running():
+            print('Server "%s" has exited prematurely.' % server)
+
+    dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)])
+
+    for image in six.itervalues(docker_images):
+        if not args.manual_run:
+            print('Removing docker image %s' % image)
+            dockerjob.remove_image(image)
+        else:
+            print('Preserving docker image: %s' % image)
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index c136af5..561217c 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -23,26 +23,31 @@
 import python_utils.jobset as jobset
 import python_utils.start_port_server as start_port_server
 
-sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', 'profiling', 'microbenchmarks', 'bm_diff'))
+sys.path.append(
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', 'profiling', 'microbenchmarks',
+        'bm_diff'))
 import bm_constants
 
 flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
 
 os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 if not os.path.exists('reports'):
-  os.makedirs('reports')
+    os.makedirs('reports')
 
 start_port_server.start_port_server()
 
+
 def fnize(s):
-  out = ''
-  for c in s:
-    if c in '<>, /':
-      if len(out) and out[-1] == '_': continue
-      out += '_'
-    else:
-      out += c
-  return out
+    out = ''
+    for c in s:
+        if c in '<>, /':
+            if len(out) and out[-1] == '_': continue
+            out += '_'
+        else:
+            out += c
+    return out
+
 
 # index html
 index_html = """
@@ -53,169 +58,202 @@
 <body>
 """
 
+
 def heading(name):
-  global index_html
-  index_html += "<h1>%s</h1>\n" % name
+    global index_html
+    index_html += "<h1>%s</h1>\n" % name
+
 
 def link(txt, tgt):
-  global index_html
-  index_html += "<p><a href=\"%s\">%s</a></p>\n" % (
-      cgi.escape(tgt, quote=True), cgi.escape(txt))
+    global index_html
+    index_html += "<p><a href=\"%s\">%s</a></p>\n" % (
+        cgi.escape(tgt, quote=True), cgi.escape(txt))
+
 
 def text(txt):
-  global index_html
-  index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
+    global index_html
+    index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
+
 
 def collect_latency(bm_name, args):
-  """generate latency profiles"""
-  benchmarks = []
-  profile_analysis = []
-  cleanup = []
+    """generate latency profiles"""
+    benchmarks = []
+    profile_analysis = []
+    cleanup = []
 
-  heading('Latency Profiles: %s' % bm_name)
-  subprocess.check_call(
-      ['make', bm_name,
-       'CONFIG=basicprof', '-j', '%d' % multiprocessing.cpu_count()])
-  for line in subprocess.check_output(['bins/basicprof/%s' % bm_name,
-                                       '--benchmark_list_tests']).splitlines():
-    link(line, '%s.txt' % fnize(line))
-    benchmarks.append(
-        jobset.JobSpec(['bins/basicprof/%s' % bm_name,
-                        '--benchmark_filter=^%s$' % line,
-                        '--benchmark_min_time=0.05'],
-                       environ={'LATENCY_TRACE': '%s.trace' % fnize(line)},
-                       shortname='profile-%s' % fnize(line)))
-    profile_analysis.append(
-        jobset.JobSpec([sys.executable,
-                        'tools/profiling/latency_profile/profile_analyzer.py',
-                        '--source', '%s.trace' % fnize(line), '--fmt', 'simple',
-                        '--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=20*60,
-                        shortname='analyze-%s' % fnize(line)))
-    cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
-    # periodically flush out the list of jobs: profile_analysis jobs at least
-    # consume upwards of five gigabytes of ram in some cases, and so analysing
-    # hundreds of them at once is impractical -- but we want at least some
-    # concurrency or the work takes too long
-    if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
-      # run up to half the cpu count: each benchmark can use up to two cores
-      # (one for the microbenchmark, one for the data flush)
-      jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
-      jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
-      jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
-      benchmarks = []
-      profile_analysis = []
-      cleanup = []
-  # run the remaining benchmarks that weren't flushed
-  if len(benchmarks):
-    jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
-    jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
-    jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+    heading('Latency Profiles: %s' % bm_name)
+    subprocess.check_call([
+        'make', bm_name, 'CONFIG=basicprof', '-j',
+        '%d' % multiprocessing.cpu_count()
+    ])
+    for line in subprocess.check_output(
+        ['bins/basicprof/%s' % bm_name, '--benchmark_list_tests']).splitlines():
+        link(line, '%s.txt' % fnize(line))
+        benchmarks.append(
+            jobset.JobSpec(
+                [
+                    'bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' %
+                    line, '--benchmark_min_time=0.05'
+                ],
+                environ={'LATENCY_TRACE': '%s.trace' % fnize(line)},
+                shortname='profile-%s' % fnize(line)))
+        profile_analysis.append(
+            jobset.JobSpec(
+                [
+                    sys.executable,
+                    'tools/profiling/latency_profile/profile_analyzer.py',
+                    '--source', '%s.trace' % fnize(line), '--fmt', 'simple',
+                    '--out', 'reports/%s.txt' % fnize(line)
+                ],
+                timeout_seconds=20 * 60,
+                shortname='analyze-%s' % fnize(line)))
+        cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
+        # periodically flush out the list of jobs: profile_analysis jobs at least
+        # consume upwards of five gigabytes of ram in some cases, and so analysing
+        # hundreds of them at once is impractical -- but we want at least some
+        # concurrency or the work takes too long
+        if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
+            # run up to half the cpu count: each benchmark can use up to two cores
+            # (one for the microbenchmark, one for the data flush)
+            jobset.run(
+                benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
+            jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+            jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+            benchmarks = []
+            profile_analysis = []
+            cleanup = []
+    # run the remaining benchmarks that weren't flushed
+    if len(benchmarks):
+        jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
+        jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+        jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+
 
 def collect_perf(bm_name, args):
-  """generate flamegraphs"""
-  heading('Flamegraphs: %s' % bm_name)
-  subprocess.check_call(
-      ['make', bm_name,
-       'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()])
-  benchmarks = []
-  profile_analysis = []
-  cleanup = []
-  for line in subprocess.check_output(['bins/mutrace/%s' % bm_name,
-                                       '--benchmark_list_tests']).splitlines():
-    link(line, '%s.svg' % fnize(line))
-    benchmarks.append(
-        jobset.JobSpec(['perf', 'record', '-o', '%s-perf.data' % fnize(line),
-                        '-g', '-F', '997',
-                        'bins/mutrace/%s' % bm_name,
-                        '--benchmark_filter=^%s$' % line,
-                        '--benchmark_min_time=10'],
-                        shortname='perf-%s' % fnize(line)))
-    profile_analysis.append(
-        jobset.JobSpec(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'],
-                       environ = {
-                           'PERF_BASE_NAME': fnize(line),
-                           'OUTPUT_DIR': 'reports',
-                           'OUTPUT_FILENAME': fnize(line),
-                       },
-                       shortname='flame-%s' % fnize(line)))
-    cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
-    cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
-    # periodically flush out the list of jobs: temporary space required for this
-    # processing is large
-    if len(benchmarks) >= 20:
-      # run up to half the cpu count: each benchmark can use up to two cores
-      # (one for the microbenchmark, one for the data flush)
-      jobset.run(benchmarks, maxjobs=1)
-      jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
-      jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
-      benchmarks = []
-      profile_analysis = []
-      cleanup = []
-  # run the remaining benchmarks that weren't flushed
-  if len(benchmarks):
-    jobset.run(benchmarks, maxjobs=1)
-    jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
-    jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+    """generate flamegraphs"""
+    heading('Flamegraphs: %s' % bm_name)
+    subprocess.check_call([
+        'make', bm_name, 'CONFIG=mutrace', '-j',
+        '%d' % multiprocessing.cpu_count()
+    ])
+    benchmarks = []
+    profile_analysis = []
+    cleanup = []
+    for line in subprocess.check_output(
+        ['bins/mutrace/%s' % bm_name, '--benchmark_list_tests']).splitlines():
+        link(line, '%s.svg' % fnize(line))
+        benchmarks.append(
+            jobset.JobSpec(
+                [
+                    'perf', 'record', '-o', '%s-perf.data' % fnize(
+                        line), '-g', '-F', '997', 'bins/mutrace/%s' % bm_name,
+                    '--benchmark_filter=^%s$' % line, '--benchmark_min_time=10'
+                ],
+                shortname='perf-%s' % fnize(line)))
+        profile_analysis.append(
+            jobset.JobSpec(
+                [
+                    'tools/run_tests/performance/process_local_perf_flamegraphs.sh'
+                ],
+                environ={
+                    'PERF_BASE_NAME': fnize(line),
+                    'OUTPUT_DIR': 'reports',
+                    'OUTPUT_FILENAME': fnize(line),
+                },
+                shortname='flame-%s' % fnize(line)))
+        cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
+        cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
+        # periodically flush out the list of jobs: temporary space required for this
+        # processing is large
+        if len(benchmarks) >= 20:
+            # run up to half the cpu count: each benchmark can use up to two cores
+            # (one for the microbenchmark, one for the data flush)
+            jobset.run(benchmarks, maxjobs=1)
+            jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+            jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+            benchmarks = []
+            profile_analysis = []
+            cleanup = []
+    # run the remaining benchmarks that weren't flushed
+    if len(benchmarks):
+        jobset.run(benchmarks, maxjobs=1)
+        jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+        jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+
 
 def run_summary(bm_name, cfg, base_json_name):
-  subprocess.check_call(
-      ['make', bm_name,
-       'CONFIG=%s' % cfg, '-j', '%d' % multiprocessing.cpu_count()])
-  cmd = ['bins/%s/%s' % (cfg, bm_name),
-         '--benchmark_out=%s.%s.json' % (base_json_name, cfg),
-         '--benchmark_out_format=json']
-  if args.summary_time is not None:
-    cmd += ['--benchmark_min_time=%d' % args.summary_time]
-  return subprocess.check_output(cmd)
+    subprocess.check_call([
+        'make', bm_name, 'CONFIG=%s' % cfg, '-j',
+        '%d' % multiprocessing.cpu_count()
+    ])
+    cmd = [
+        'bins/%s/%s' % (cfg, bm_name), '--benchmark_out=%s.%s.json' %
+        (base_json_name, cfg), '--benchmark_out_format=json'
+    ]
+    if args.summary_time is not None:
+        cmd += ['--benchmark_min_time=%d' % args.summary_time]
+    return subprocess.check_output(cmd)
+
 
 def collect_summary(bm_name, args):
-  heading('Summary: %s [no counters]' % bm_name)
-  text(run_summary(bm_name, 'opt', bm_name))
-  heading('Summary: %s [with counters]' % bm_name)
-  text(run_summary(bm_name, 'counters', bm_name))
-  if args.bigquery_upload:
-    with open('%s.csv' % bm_name, 'w') as f:
-      f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py',
-                                       '%s.counters.json' % bm_name,
-                                       '%s.opt.json' % bm_name]))
-    subprocess.check_call(['bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name])
+    heading('Summary: %s [no counters]' % bm_name)
+    text(run_summary(bm_name, 'opt', bm_name))
+    heading('Summary: %s [with counters]' % bm_name)
+    text(run_summary(bm_name, 'counters', bm_name))
+    if args.bigquery_upload:
+        with open('%s.csv' % bm_name, 'w') as f:
+            f.write(
+                subprocess.check_output([
+                    'tools/profiling/microbenchmarks/bm2bq.py',
+                    '%s.counters.json' % bm_name, '%s.opt.json' % bm_name
+                ]))
+        subprocess.check_call([
+            'bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name
+        ])
+
 
 collectors = {
-  'latency': collect_latency,
-  'perf': collect_perf,
-  'summary': collect_summary,
+    'latency': collect_latency,
+    'perf': collect_perf,
+    'summary': collect_summary,
 }
 
 argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
-argp.add_argument('-c', '--collect',
-                  choices=sorted(collectors.keys()),
-                  nargs='*',
-                  default=sorted(collectors.keys()),
-                  help='Which collectors should be run against each benchmark')
-argp.add_argument('-b', '--benchmarks',
-                  choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-                  default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-                  nargs='+',
-                  type=str,
-                  help='Which microbenchmarks should be run')
-argp.add_argument('--bigquery_upload',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Upload results from summary collection to bigquery')
-argp.add_argument('--summary_time',
-                  default=None,
-                  type=int,
-                  help='Minimum time to run benchmarks for the summary collection')
+argp.add_argument(
+    '-c',
+    '--collect',
+    choices=sorted(collectors.keys()),
+    nargs='*',
+    default=sorted(collectors.keys()),
+    help='Which collectors should be run against each benchmark')
+argp.add_argument(
+    '-b',
+    '--benchmarks',
+    choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+    default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+    nargs='+',
+    type=str,
+    help='Which microbenchmarks should be run')
+argp.add_argument(
+    '--bigquery_upload',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Upload results from summary collection to bigquery')
+argp.add_argument(
+    '--summary_time',
+    default=None,
+    type=int,
+    help='Minimum time to run benchmarks for the summary collection')
 args = argp.parse_args()
 
 try:
-  for collect in args.collect:
-    for bm_name in args.benchmarks:
-      collectors[collect](bm_name, args)
+    for collect in args.collect:
+        for bm_name in args.benchmarks:
+            collectors[collect](bm_name, args)
 finally:
-  if not os.path.exists('reports'):
-    os.makedirs('reports')
-  index_html += "</body>\n</html>\n"
-  with open('reports/index.html', 'w') as f:
-    f.write(index_html)
+    if not os.path.exists('reports'):
+        os.makedirs('reports')
+    index_html += "</body>\n</html>\n"
+    with open('reports/index.html', 'w') as f:
+        f.write(index_html)
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index aa305be..03b684b 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Run performance tests locally or remotely."""
 
 from __future__ import print_function
@@ -37,566 +36,666 @@
 import python_utils.jobset as jobset
 import python_utils.report_utils as report_utils
 
-
 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 os.chdir(_ROOT)
 
-
 _REMOTE_HOST_USERNAME = 'jenkins'
 
 
 class QpsWorkerJob:
-  """Encapsulates a qps worker server job."""
+    """Encapsulates a qps worker server job."""
 
-  def __init__(self, spec, language, host_and_port, perf_file_base_name=None):
-    self._spec = spec
-    self.language = language
-    self.host_and_port = host_and_port
-    self._job = None
-    self.perf_file_base_name = perf_file_base_name
+    def __init__(self, spec, language, host_and_port, perf_file_base_name=None):
+        self._spec = spec
+        self.language = language
+        self.host_and_port = host_and_port
+        self._job = None
+        self.perf_file_base_name = perf_file_base_name
 
-  def start(self):
-    self._job = jobset.Job(self._spec, newline_on_success=True, travis=True, add_env={})
+    def start(self):
+        self._job = jobset.Job(
+            self._spec, newline_on_success=True, travis=True, add_env={})
 
-  def is_running(self):
-    """Polls a job and returns True if given job is still running."""
-    return self._job and self._job.state() == jobset._RUNNING
+    def is_running(self):
+        """Polls a job and returns True if given job is still running."""
+        return self._job and self._job.state() == jobset._RUNNING
 
-  def kill(self):
-    if self._job:
-      self._job.kill()
-      self._job = None
+    def kill(self):
+        if self._job:
+            self._job.kill()
+            self._job = None
 
 
-def create_qpsworker_job(language, shortname=None, port=10000, remote_host=None, perf_cmd=None):
-  cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port])
+def create_qpsworker_job(language,
+                         shortname=None,
+                         port=10000,
+                         remote_host=None,
+                         perf_cmd=None):
+    cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port])
 
-  if remote_host:
-    host_and_port='%s:%s' % (remote_host, port)
-  else:
-    host_and_port='localhost:%s' % port
+    if remote_host:
+        host_and_port = '%s:%s' % (remote_host, port)
+    else:
+        host_and_port = 'localhost:%s' % port
 
-  perf_file_base_name = None
-  if perf_cmd:
-    perf_file_base_name = '%s-%s' % (host_and_port, shortname)
-    # specify -o output file so perf.data gets collected when worker stopped
-    cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name] + cmdline
+    perf_file_base_name = None
+    if perf_cmd:
+        perf_file_base_name = '%s-%s' % (host_and_port, shortname)
+        # specify -o output file so perf.data gets collected when worker stopped
+        cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name
+                             ] + cmdline
 
-  worker_timeout = 3 * 60
-  if remote_host:
-    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
-    ssh_cmd = ['ssh']
-    cmdline = ['timeout', '%s' % (worker_timeout + 30)] + cmdline
-    ssh_cmd.extend([str(user_at_host), 'cd ~/performance_workspace/grpc/ && python tools/run_tests/start_port_server.py && %s' % ' '.join(cmdline)])
-    cmdline = ssh_cmd
+    worker_timeout = 3 * 60
+    if remote_host:
+        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+        ssh_cmd = ['ssh']
+        cmdline = ['timeout', '%s' % (worker_timeout + 30)] + cmdline
+        ssh_cmd.extend([
+            str(user_at_host),
+            'cd ~/performance_workspace/grpc/ && python tools/run_tests/start_port_server.py && %s'
+            % ' '.join(cmdline)
+        ])
+        cmdline = ssh_cmd
 
-  jobspec = jobset.JobSpec(
-      cmdline=cmdline,
-      shortname=shortname,
-      timeout_seconds=worker_timeout,  # workers get restarted after each scenario
-      verbose_success=True)
-  return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
+    jobspec = jobset.JobSpec(
+        cmdline=cmdline,
+        shortname=shortname,
+        timeout_seconds=worker_timeout,  # workers get restarted after each scenario
+        verbose_success=True)
+    return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
 
 
-def create_scenario_jobspec(scenario_json, workers, remote_host=None,
-                            bq_result_table=None, server_cpu_load=0):
-  """Runs one scenario using QPS driver."""
-  # setting QPS_WORKERS env variable here makes sure it works with SSH too.
-  cmd = 'QPS_WORKERS="%s" ' % ','.join(workers)
-  if bq_result_table:
-    cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
-  cmd += 'tools/run_tests/performance/run_qps_driver.sh '
-  cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]}))
-  cmd += '--scenario_result_file=scenario_result.json '
-  if server_cpu_load != 0:
-      cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load
-  if remote_host:
-    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
-    cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
+def create_scenario_jobspec(scenario_json,
+                            workers,
+                            remote_host=None,
+                            bq_result_table=None,
+                            server_cpu_load=0):
+    """Runs one scenario using QPS driver."""
+    # setting QPS_WORKERS env variable here makes sure it works with SSH too.
+    cmd = 'QPS_WORKERS="%s" ' % ','.join(workers)
+    if bq_result_table:
+        cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
+    cmd += 'tools/run_tests/performance/run_qps_driver.sh '
+    cmd += '--scenarios_json=%s ' % pipes.quote(
+        json.dumps({
+            'scenarios': [scenario_json]
+        }))
+    cmd += '--scenario_result_file=scenario_result.json '
+    if server_cpu_load != 0:
+        cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load
+    if remote_host:
+        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+        cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+            user_at_host, pipes.quote(cmd))
 
-  return jobset.JobSpec(
-      cmdline=[cmd],
-      shortname='qps_json_driver.%s' % scenario_json['name'],
-      timeout_seconds=12*60,
-      shell=True,
-      verbose_success=True)
+    return jobset.JobSpec(
+        cmdline=[cmd],
+        shortname='qps_json_driver.%s' % scenario_json['name'],
+        timeout_seconds=12 * 60,
+        shell=True,
+        verbose_success=True)
 
 
 def create_quit_jobspec(workers, remote_host=None):
-  """Runs quit using QPS driver."""
-  # setting QPS_WORKERS env variable here makes sure it works with SSH too.
-  cmd = 'QPS_WORKERS="%s" bins/opt/qps_json_driver --quit' % ','.join(w.host_and_port for w in workers)
-  if remote_host:
-    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
-    cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
+    """Runs quit using QPS driver."""
+    # setting QPS_WORKERS env variable here makes sure it works with SSH too.
+    cmd = 'QPS_WORKERS="%s" bins/opt/qps_json_driver --quit' % ','.join(
+        w.host_and_port for w in workers)
+    if remote_host:
+        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+        cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+            user_at_host, pipes.quote(cmd))
 
-  return jobset.JobSpec(
-      cmdline=[cmd],
-      shortname='qps_json_driver.quit',
-      timeout_seconds=3*60,
-      shell=True,
-      verbose_success=True)
+    return jobset.JobSpec(
+        cmdline=[cmd],
+        shortname='qps_json_driver.quit',
+        timeout_seconds=3 * 60,
+        shell=True,
+        verbose_success=True)
 
 
-def create_netperf_jobspec(server_host='localhost', client_host=None,
+def create_netperf_jobspec(server_host='localhost',
+                           client_host=None,
                            bq_result_table=None):
-  """Runs netperf benchmark."""
-  cmd = 'NETPERF_SERVER_HOST="%s" ' % server_host
-  if bq_result_table:
-    cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
-  if client_host:
-    # If netperf is running remotely, the env variables populated by Jenkins
-    # won't be available on the client, but we need them for uploading results
-    # to BigQuery.
-    jenkins_job_name = os.getenv('JOB_NAME')
-    if jenkins_job_name:
-      cmd += 'JOB_NAME="%s" ' % jenkins_job_name
-    jenkins_build_number = os.getenv('BUILD_NUMBER')
-    if jenkins_build_number:
-      cmd += 'BUILD_NUMBER="%s" ' % jenkins_build_number
+    """Runs netperf benchmark."""
+    cmd = 'NETPERF_SERVER_HOST="%s" ' % server_host
+    if bq_result_table:
+        cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
+    if client_host:
+        # If netperf is running remotely, the env variables populated by Jenkins
+        # won't be available on the client, but we need them for uploading results
+        # to BigQuery.
+        jenkins_job_name = os.getenv('JOB_NAME')
+        if jenkins_job_name:
+            cmd += 'JOB_NAME="%s" ' % jenkins_job_name
+        jenkins_build_number = os.getenv('BUILD_NUMBER')
+        if jenkins_build_number:
+            cmd += 'BUILD_NUMBER="%s" ' % jenkins_build_number
 
-  cmd += 'tools/run_tests/performance/run_netperf.sh'
-  if client_host:
-    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, client_host)
-    cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
+    cmd += 'tools/run_tests/performance/run_netperf.sh'
+    if client_host:
+        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, client_host)
+        cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+            user_at_host, pipes.quote(cmd))
 
-  return jobset.JobSpec(
-      cmdline=[cmd],
-      shortname='netperf',
-      timeout_seconds=60,
-      shell=True,
-      verbose_success=True)
+    return jobset.JobSpec(
+        cmdline=[cmd],
+        shortname='netperf',
+        timeout_seconds=60,
+        shell=True,
+        verbose_success=True)
 
 
 def archive_repo(languages):
-  """Archives local version of repo including submodules."""
-  cmdline=['tar', '-cf', '../grpc.tar', '../grpc/']
-  if 'java' in languages:
-    cmdline.append('../grpc-java')
-  if 'go' in languages:
-    cmdline.append('../grpc-go')
+    """Archives local version of repo including submodules."""
+    cmdline = ['tar', '-cf', '../grpc.tar', '../grpc/']
+    if 'java' in languages:
+        cmdline.append('../grpc-java')
+    if 'go' in languages:
+        cmdline.append('../grpc-go')
 
-  archive_job = jobset.JobSpec(
-      cmdline=cmdline,
-      shortname='archive_repo',
-      timeout_seconds=3*60)
+    archive_job = jobset.JobSpec(
+        cmdline=cmdline, shortname='archive_repo', timeout_seconds=3 * 60)
 
-  jobset.message('START', 'Archiving local repository.', do_newline=True)
-  num_failures, _ = jobset.run(
-      [archive_job], newline_on_success=True, maxjobs=1)
-  if num_failures == 0:
-    jobset.message('SUCCESS',
-                   'Archive with local repository created successfully.',
-                   do_newline=True)
-  else:
-    jobset.message('FAILED', 'Failed to archive local repository.',
-                   do_newline=True)
-    sys.exit(1)
+    jobset.message('START', 'Archiving local repository.', do_newline=True)
+    num_failures, _ = jobset.run(
+        [archive_job], newline_on_success=True, maxjobs=1)
+    if num_failures == 0:
+        jobset.message(
+            'SUCCESS',
+            'Archive with local repository created successfully.',
+            do_newline=True)
+    else:
+        jobset.message(
+            'FAILED', 'Failed to archive local repository.', do_newline=True)
+        sys.exit(1)
 
 
 def prepare_remote_hosts(hosts, prepare_local=False):
-  """Prepares remote hosts (and maybe prepare localhost as well)."""
-  prepare_timeout = 10*60
-  prepare_jobs = []
-  for host in hosts:
-    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
-    prepare_jobs.append(
-        jobset.JobSpec(
-            cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
-            shortname='remote_host_prepare.%s' % host,
-            environ = {'USER_AT_HOST': user_at_host},
-            timeout_seconds=prepare_timeout))
-  if prepare_local:
-    # Prepare localhost as well
-    prepare_jobs.append(
-        jobset.JobSpec(
-            cmdline=['tools/run_tests/performance/kill_workers.sh'],
-            shortname='local_prepare',
-            timeout_seconds=prepare_timeout))
-  jobset.message('START', 'Preparing hosts.', do_newline=True)
-  num_failures, _ = jobset.run(
-      prepare_jobs, newline_on_success=True, maxjobs=10)
-  if num_failures == 0:
-    jobset.message('SUCCESS',
-                   'Prepare step completed successfully.',
-                   do_newline=True)
-  else:
-    jobset.message('FAILED', 'Failed to prepare remote hosts.',
-                   do_newline=True)
-    sys.exit(1)
+    """Prepares remote hosts (and maybe prepare localhost as well)."""
+    prepare_timeout = 10 * 60
+    prepare_jobs = []
+    for host in hosts:
+        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
+        prepare_jobs.append(
+            jobset.JobSpec(
+                cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
+                shortname='remote_host_prepare.%s' % host,
+                environ={'USER_AT_HOST': user_at_host},
+                timeout_seconds=prepare_timeout))
+    if prepare_local:
+        # Prepare localhost as well
+        prepare_jobs.append(
+            jobset.JobSpec(
+                cmdline=['tools/run_tests/performance/kill_workers.sh'],
+                shortname='local_prepare',
+                timeout_seconds=prepare_timeout))
+    jobset.message('START', 'Preparing hosts.', do_newline=True)
+    num_failures, _ = jobset.run(
+        prepare_jobs, newline_on_success=True, maxjobs=10)
+    if num_failures == 0:
+        jobset.message(
+            'SUCCESS', 'Prepare step completed successfully.', do_newline=True)
+    else:
+        jobset.message(
+            'FAILED', 'Failed to prepare remote hosts.', do_newline=True)
+        sys.exit(1)
 
 
-def build_on_remote_hosts(hosts, languages=scenario_config.LANGUAGES.keys(), build_local=False):
-  """Builds performance worker on remote hosts (and maybe also locally)."""
-  build_timeout = 15*60
-  # Kokoro VMs (which are local only) do not have caching, so they need more time to build
-  local_build_timeout = 30*60
-  build_jobs = []
-  for host in hosts:
-    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
-    build_jobs.append(
-        jobset.JobSpec(
-            cmdline=['tools/run_tests/performance/remote_host_build.sh'] + languages,
-            shortname='remote_host_build.%s' % host,
-            environ = {'USER_AT_HOST': user_at_host, 'CONFIG': 'opt'},
-            timeout_seconds=build_timeout))
-  if build_local:
-    # Build locally as well
-    build_jobs.append(
-        jobset.JobSpec(
-            cmdline=['tools/run_tests/performance/build_performance.sh'] + languages,
-            shortname='local_build',
-            environ = {'CONFIG': 'opt'},
-            timeout_seconds=local_build_timeout))
-  jobset.message('START', 'Building.', do_newline=True)
-  num_failures, _ = jobset.run(
-      build_jobs, newline_on_success=True, maxjobs=10)
-  if num_failures == 0:
-    jobset.message('SUCCESS',
-                   'Built successfully.',
-                   do_newline=True)
-  else:
-    jobset.message('FAILED', 'Build failed.',
-                   do_newline=True)
-    sys.exit(1)
+def build_on_remote_hosts(hosts,
+                          languages=scenario_config.LANGUAGES.keys(),
+                          build_local=False):
+    """Builds performance worker on remote hosts (and maybe also locally)."""
+    build_timeout = 15 * 60
+    # Kokoro VMs (which are local only) do not have caching, so they need more time to build
+    local_build_timeout = 30 * 60
+    build_jobs = []
+    for host in hosts:
+        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
+        build_jobs.append(
+            jobset.JobSpec(
+                cmdline=['tools/run_tests/performance/remote_host_build.sh'] +
+                languages,
+                shortname='remote_host_build.%s' % host,
+                environ={'USER_AT_HOST': user_at_host,
+                         'CONFIG': 'opt'},
+                timeout_seconds=build_timeout))
+    if build_local:
+        # Build locally as well
+        build_jobs.append(
+            jobset.JobSpec(
+                cmdline=['tools/run_tests/performance/build_performance.sh'] +
+                languages,
+                shortname='local_build',
+                environ={'CONFIG': 'opt'},
+                timeout_seconds=local_build_timeout))
+    jobset.message('START', 'Building.', do_newline=True)
+    num_failures, _ = jobset.run(
+        build_jobs, newline_on_success=True, maxjobs=10)
+    if num_failures == 0:
+        jobset.message('SUCCESS', 'Built successfully.', do_newline=True)
+    else:
+        jobset.message('FAILED', 'Build failed.', do_newline=True)
+        sys.exit(1)
 
 
 def create_qpsworkers(languages, worker_hosts, perf_cmd=None):
-  """Creates QPS workers (but does not start them)."""
-  if not worker_hosts:
-    # run two workers locally (for each language)
-    workers=[(None, 10000), (None, 10010)]
-  elif len(worker_hosts) == 1:
-    # run two workers on the remote host (for each language)
-    workers=[(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
-  else:
-    # run one worker per each remote host (for each language)
-    workers=[(worker_host, 10000) for worker_host in worker_hosts]
+    """Creates QPS workers (but does not start them)."""
+    if not worker_hosts:
+        # run two workers locally (for each language)
+        workers = [(None, 10000), (None, 10010)]
+    elif len(worker_hosts) == 1:
+        # run two workers on the remote host (for each language)
+        workers = [(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
+    else:
+        # run one worker per each remote host (for each language)
+        workers = [(worker_host, 10000) for worker_host in worker_hosts]
 
-  return [create_qpsworker_job(language,
-                               shortname= 'qps_worker_%s_%s' % (language,
-                                                                worker_idx),
-                               port=worker[1] + language.worker_port_offset(),
-                               remote_host=worker[0],
-                               perf_cmd=perf_cmd)
-          for language in languages
-          for worker_idx, worker in enumerate(workers)]
+    return [
+        create_qpsworker_job(
+            language,
+            shortname='qps_worker_%s_%s' % (language, worker_idx),
+            port=worker[1] + language.worker_port_offset(),
+            remote_host=worker[0],
+            perf_cmd=perf_cmd)
+        for language in languages for worker_idx, worker in enumerate(workers)
+    ]
 
 
-def perf_report_processor_job(worker_host, perf_base_name, output_filename, flame_graph_reports):
-  print('Creating perf report collection job for %s' % worker_host)
-  cmd = ''
-  if worker_host != 'localhost':
-    user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
-    cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\
-         tools/run_tests/performance/process_remote_perf_flamegraphs.sh" \
-          % (user_at_host, output_filename, flame_graph_reports, perf_base_name)
-  else:
-    cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\
-          tools/run_tests/performance/process_local_perf_flamegraphs.sh" \
-          % (output_filename, flame_graph_reports, perf_base_name)
+def perf_report_processor_job(worker_host, perf_base_name, output_filename,
+                              flame_graph_reports):
+    print('Creating perf report collection job for %s' % worker_host)
+    cmd = ''
+    if worker_host != 'localhost':
+        user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
+        cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%stools/run_tests/performance/process_remote_perf_flamegraphs.sh" % (
+            user_at_host, output_filename, flame_graph_reports, perf_base_name)
+    else:
+        cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%stools/run_tests/performance/process_local_perf_flamegraphs.sh" % (
+            output_filename, flame_graph_reports, perf_base_name)
 
-  return jobset.JobSpec(cmdline=cmd,
-                        timeout_seconds=3*60,
-                        shell=True,
-                        verbose_success=True,
-                        shortname='process perf report')
+    return jobset.JobSpec(
+        cmdline=cmd,
+        timeout_seconds=3 * 60,
+        shell=True,
+        verbose_success=True,
+        shortname='process perf report')
 
 
 Scenario = collections.namedtuple('Scenario', 'jobspec workers name')
 
 
-def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*',
-                     category='all', bq_result_table=None,
-                     netperf=False, netperf_hosts=[], server_cpu_load=0):
-  """Create jobspecs for scenarios to run."""
-  all_workers = [worker
-                 for workers in workers_by_lang.values()
-                 for worker in workers]
-  scenarios = []
-  _NO_WORKERS = []
+def create_scenarios(languages,
+                     workers_by_lang,
+                     remote_host=None,
+                     regex='.*',
+                     category='all',
+                     bq_result_table=None,
+                     netperf=False,
+                     netperf_hosts=[],
+                     server_cpu_load=0):
+    """Create jobspecs for scenarios to run."""
+    all_workers = [
+        worker for workers in workers_by_lang.values() for worker in workers
+    ]
+    scenarios = []
+    _NO_WORKERS = []
 
-  if netperf:
-    if not netperf_hosts:
-      netperf_server='localhost'
-      netperf_client=None
-    elif len(netperf_hosts) == 1:
-      netperf_server=netperf_hosts[0]
-      netperf_client=netperf_hosts[0]
-    else:
-      netperf_server=netperf_hosts[0]
-      netperf_client=netperf_hosts[1]
-    scenarios.append(Scenario(
-        create_netperf_jobspec(server_host=netperf_server,
-                               client_host=netperf_client,
-                               bq_result_table=bq_result_table),
-        _NO_WORKERS, 'netperf'))
+    if netperf:
+        if not netperf_hosts:
+            netperf_server = 'localhost'
+            netperf_client = None
+        elif len(netperf_hosts) == 1:
+            netperf_server = netperf_hosts[0]
+            netperf_client = netperf_hosts[0]
+        else:
+            netperf_server = netperf_hosts[0]
+            netperf_client = netperf_hosts[1]
+        scenarios.append(
+            Scenario(
+                create_netperf_jobspec(
+                    server_host=netperf_server,
+                    client_host=netperf_client,
+                    bq_result_table=bq_result_table), _NO_WORKERS, 'netperf'))
 
-  for language in languages:
-    for scenario_json in language.scenarios():
-      if re.search(regex, scenario_json['name']):
-        categories = scenario_json.get('CATEGORIES', ['scalable', 'smoketest'])
-        if category in categories or category == 'all':
-          workers = workers_by_lang[str(language)][:]
-          # 'SERVER_LANGUAGE' is an indicator for this script to pick
-          # a server in different language.
-          custom_server_lang = scenario_json.get('SERVER_LANGUAGE', None)
-          custom_client_lang = scenario_json.get('CLIENT_LANGUAGE', None)
-          scenario_json = scenario_config.remove_nonproto_fields(scenario_json)
-          if custom_server_lang and custom_client_lang:
-            raise Exception('Cannot set both custom CLIENT_LANGUAGE and SERVER_LANGUAGE'
+    for language in languages:
+        for scenario_json in language.scenarios():
+            if re.search(regex, scenario_json['name']):
+                categories = scenario_json.get('CATEGORIES',
+                                               ['scalable', 'smoketest'])
+                if category in categories or category == 'all':
+                    workers = workers_by_lang[str(language)][:]
+                    # 'SERVER_LANGUAGE' is an indicator for this script to pick
+                    # a server in different language.
+                    custom_server_lang = scenario_json.get('SERVER_LANGUAGE',
+                                                           None)
+                    custom_client_lang = scenario_json.get('CLIENT_LANGUAGE',
+                                                           None)
+                    scenario_json = scenario_config.remove_nonproto_fields(
+                        scenario_json)
+                    if custom_server_lang and custom_client_lang:
+                        raise Exception(
+                            'Cannot set both custom CLIENT_LANGUAGE and SERVER_LANGUAGE'
                             'in the same scenario')
-          if custom_server_lang:
-            if not workers_by_lang.get(custom_server_lang, []):
-              print('Warning: Skipping scenario %s as' % scenario_json['name'])
-              print('SERVER_LANGUAGE is set to %s yet the language has '
-                    'not been selected with -l' % custom_server_lang)
-              continue
-            for idx in range(0, scenario_json['num_servers']):
-              # replace first X workers by workers of a different language
-              workers[idx] = workers_by_lang[custom_server_lang][idx]
-          if custom_client_lang:
-            if not workers_by_lang.get(custom_client_lang, []):
-              print('Warning: Skipping scenario %s as' % scenario_json['name'])
-              print('CLIENT_LANGUAGE is set to %s yet the language has '
-                    'not been selected with -l' % custom_client_lang)
-              continue
-            for idx in range(scenario_json['num_servers'], len(workers)):
-              # replace all client workers by workers of a different language,
-              # leave num_server workers as they are server workers.
-              workers[idx] = workers_by_lang[custom_client_lang][idx]
-          scenario = Scenario(
-              create_scenario_jobspec(scenario_json,
-                                      [w.host_and_port for w in workers],
-                                      remote_host=remote_host,
-                                      bq_result_table=bq_result_table,
-                                      server_cpu_load=server_cpu_load),
-              workers,
-              scenario_json['name'])
-          scenarios.append(scenario)
+                    if custom_server_lang:
+                        if not workers_by_lang.get(custom_server_lang, []):
+                            print('Warning: Skipping scenario %s as' %
+                                  scenario_json['name'])
+                            print(
+                                'SERVER_LANGUAGE is set to %s yet the language has '
+                                'not been selected with -l' %
+                                custom_server_lang)
+                            continue
+                        for idx in range(0, scenario_json['num_servers']):
+                            # replace first X workers by workers of a different language
+                            workers[idx] = workers_by_lang[custom_server_lang][
+                                idx]
+                    if custom_client_lang:
+                        if not workers_by_lang.get(custom_client_lang, []):
+                            print('Warning: Skipping scenario %s as' %
+                                  scenario_json['name'])
+                            print(
+                                'CLIENT_LANGUAGE is set to %s yet the language has '
+                                'not been selected with -l' %
+                                custom_client_lang)
+                            continue
+                        for idx in range(scenario_json['num_servers'],
+                                         len(workers)):
+                            # replace all client workers by workers of a different language,
+                            # leave num_server workers as they are server workers.
+                            workers[idx] = workers_by_lang[custom_client_lang][
+                                idx]
+                    scenario = Scenario(
+                        create_scenario_jobspec(
+                            scenario_json, [w.host_and_port for w in workers],
+                            remote_host=remote_host,
+                            bq_result_table=bq_result_table,
+                            server_cpu_load=server_cpu_load), workers,
+                        scenario_json['name'])
+                    scenarios.append(scenario)
 
-  return scenarios
+    return scenarios
 
 
 def finish_qps_workers(jobs, qpsworker_jobs):
-  """Waits for given jobs to finish and eventually kills them."""
-  retries = 0
-  num_killed = 0
-  while any(job.is_running() for job in jobs):
-    for job in qpsworker_jobs:
-      if job.is_running():
-        print('QPS worker "%s" is still running.' % job.host_and_port)
-    if retries > 10:
-      print('Killing all QPS workers.')
-      for job in jobs:
-        job.kill()
-        num_killed += 1
-    retries += 1
-    time.sleep(3)
-  print('All QPS workers finished.')
-  return num_killed
+    """Waits for given jobs to finish and eventually kills them."""
+    retries = 0
+    num_killed = 0
+    while any(job.is_running() for job in jobs):
+        for job in qpsworker_jobs:
+            if job.is_running():
+                print('QPS worker "%s" is still running.' % job.host_and_port)
+        if retries > 10:
+            print('Killing all QPS workers.')
+            for job in jobs:
+                job.kill()
+                num_killed += 1
+        retries += 1
+        time.sleep(3)
+    print('All QPS workers finished.')
+    return num_killed
+
 
 profile_output_files = []
 
+
 # Collect perf text reports and flamegraphs if perf_cmd was used
 # Note the base names of perf text reports are used when creating and processing
 # perf data. The scenario name uniqifies the output name in the final
 # perf reports directory.
 # Alos, the perf profiles need to be fetched and processed after each scenario
 # in order to avoid clobbering the output files.
-def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name, flame_graph_reports):
-  perf_report_jobs = []
-  global profile_output_files
-  for host_and_port in hosts_and_base_names:
-    perf_base_name = hosts_and_base_names[host_and_port]
-    output_filename = '%s-%s' % (scenario_name, perf_base_name)
-    # from the base filename, create .svg output filename
-    host = host_and_port.split(':')[0]
-    profile_output_files.append('%s.svg' % output_filename)
-    perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename, flame_graph_reports))
+def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name,
+                                  flame_graph_reports):
+    perf_report_jobs = []
+    global profile_output_files
+    for host_and_port in hosts_and_base_names:
+        perf_base_name = hosts_and_base_names[host_and_port]
+        output_filename = '%s-%s' % (scenario_name, perf_base_name)
+        # from the base filename, create .svg output filename
+        host = host_and_port.split(':')[0]
+        profile_output_files.append('%s.svg' % output_filename)
+        perf_report_jobs.append(
+            perf_report_processor_job(host, perf_base_name, output_filename,
+                                      flame_graph_reports))
 
-  jobset.message('START', 'Collecting perf reports from qps workers', do_newline=True)
-  failures, _ = jobset.run(perf_report_jobs, newline_on_success=True, maxjobs=1)
-  jobset.message('END', 'Collecting perf reports from qps workers', do_newline=True)
-  return failures
+    jobset.message(
+        'START', 'Collecting perf reports from qps workers', do_newline=True)
+    failures, _ = jobset.run(
+        perf_report_jobs, newline_on_success=True, maxjobs=1)
+    jobset.message(
+        'END', 'Collecting perf reports from qps workers', do_newline=True)
+    return failures
+
 
 def main():
-  argp = argparse.ArgumentParser(description='Run performance tests.')
-  argp.add_argument('-l', '--language',
-                    choices=['all'] + sorted(scenario_config.LANGUAGES.keys()),
-                    nargs='+',
-                    required=True,
-                    help='Languages to benchmark.')
-  argp.add_argument('--remote_driver_host',
-                    default=None,
-                    help='Run QPS driver on given host. By default, QPS driver is run locally.')
-  argp.add_argument('--remote_worker_host',
-                    nargs='+',
-                    default=[],
-                    help='Worker hosts where to start QPS workers.')
-  argp.add_argument('--dry_run',
-                    default=False,
-                    action='store_const',
-                    const=True,
-                    help='Just list scenarios to be run, but don\'t run them.')
-  argp.add_argument('-r', '--regex', default='.*', type=str,
-                    help='Regex to select scenarios to run.')
-  argp.add_argument('--bq_result_table', default=None, type=str,
-                    help='Bigquery "dataset.table" to upload results to.')
-  argp.add_argument('--category',
-                    choices=['smoketest','all','scalable','sweep'],
-                    default='all',
-                    help='Select a category of tests to run.')
-  argp.add_argument('--netperf',
-                    default=False,
-                    action='store_const',
-                    const=True,
-                    help='Run netperf benchmark as one of the scenarios.')
-  argp.add_argument('--server_cpu_load',
-                    default=0, type=int,
-                    help='Select a targeted server cpu load to run. 0 means ignore this flag')
-  argp.add_argument('-x', '--xml_report', default='report.xml', type=str,
-                    help='Name of XML report file to generate.')
-  argp.add_argument('--perf_args',
-                    help=('Example usage: "--perf_args=record -F 99 -g". '
-                          'Wrap QPS workers in a perf command '
-                          'with the arguments to perf specified here. '
-                          '".svg" flame graph profiles will be '
-                          'created for each Qps Worker on each scenario. '
-                          'Files will output to "<repo_root>/<args.flame_graph_reports>" '
-                          'directory. Output files from running the worker '
-                          'under perf are saved in the repo root where its ran. '
-                          'Note that the perf "-g" flag is necessary for '
-                          'flame graphs generation to work (assuming the binary '
-                          'being profiled uses frame pointers, check out '
-                          '"--call-graph dwarf" option using libunwind otherwise.) '
-                          'Also note that the entire "--perf_args=<arg(s)>" must '
-                          'be wrapped in quotes as in the example usage. '
-                          'If the "--perg_args" is unspecified, "perf" will '
-                          'not be used at all. '
-                          'See http://www.brendangregg.com/perf.html '
-                          'for more general perf examples.'))
-  argp.add_argument('--skip_generate_flamegraphs',
-                    default=False,
-                    action='store_const',
-                    const=True,
-                    help=('Turn flame graph generation off. '
-                          'May be useful if "perf_args" arguments do not make sense for '
-                          'generating flamegraphs (e.g., "--perf_args=stat ...")'))
-  argp.add_argument('-f', '--flame_graph_reports', default='perf_reports', type=str,
-                    help='Name of directory to output flame graph profiles to, if any are created.')
-  argp.add_argument('-u', '--remote_host_username', default='', type=str,
-                    help='Use a username that isn\'t "Jenkins" to SSH into remote workers.')
+    argp = argparse.ArgumentParser(description='Run performance tests.')
+    argp.add_argument(
+        '-l',
+        '--language',
+        choices=['all'] + sorted(scenario_config.LANGUAGES.keys()),
+        nargs='+',
+        required=True,
+        help='Languages to benchmark.')
+    argp.add_argument(
+        '--remote_driver_host',
+        default=None,
+        help='Run QPS driver on given host. By default, QPS driver is run locally.'
+    )
+    argp.add_argument(
+        '--remote_worker_host',
+        nargs='+',
+        default=[],
+        help='Worker hosts where to start QPS workers.')
+    argp.add_argument(
+        '--dry_run',
+        default=False,
+        action='store_const',
+        const=True,
+        help='Just list scenarios to be run, but don\'t run them.')
+    argp.add_argument(
+        '-r',
+        '--regex',
+        default='.*',
+        type=str,
+        help='Regex to select scenarios to run.')
+    argp.add_argument(
+        '--bq_result_table',
+        default=None,
+        type=str,
+        help='Bigquery "dataset.table" to upload results to.')
+    argp.add_argument(
+        '--category',
+        choices=['smoketest', 'all', 'scalable', 'sweep'],
+        default='all',
+        help='Select a category of tests to run.')
+    argp.add_argument(
+        '--netperf',
+        default=False,
+        action='store_const',
+        const=True,
+        help='Run netperf benchmark as one of the scenarios.')
+    argp.add_argument(
+        '--server_cpu_load',
+        default=0,
+        type=int,
+        help='Select a targeted server cpu load to run. 0 means ignore this flag'
+    )
+    argp.add_argument(
+        '-x',
+        '--xml_report',
+        default='report.xml',
+        type=str,
+        help='Name of XML report file to generate.')
+    argp.add_argument(
+        '--perf_args',
+        help=('Example usage: "--perf_args=record -F 99 -g". '
+              'Wrap QPS workers in a perf command '
+              'with the arguments to perf specified here. '
+              '".svg" flame graph profiles will be '
+              'created for each Qps Worker on each scenario. '
+              'Files will output to "<repo_root>/<args.flame_graph_reports>" '
+              'directory. Output files from running the worker '
+              'under perf are saved in the repo root where its ran. '
+              'Note that the perf "-g" flag is necessary for '
+              'flame graphs generation to work (assuming the binary '
+              'being profiled uses frame pointers, check out '
+              '"--call-graph dwarf" option using libunwind otherwise.) '
+              'Also note that the entire "--perf_args=<arg(s)>" must '
+              'be wrapped in quotes as in the example usage. '
+              'If the "--perg_args" is unspecified, "perf" will '
+              'not be used at all. '
+              'See http://www.brendangregg.com/perf.html '
+              'for more general perf examples.'))
+    argp.add_argument(
+        '--skip_generate_flamegraphs',
+        default=False,
+        action='store_const',
+        const=True,
+        help=('Turn flame graph generation off. '
+              'May be useful if "perf_args" arguments do not make sense for '
+              'generating flamegraphs (e.g., "--perf_args=stat ...")'))
+    argp.add_argument(
+        '-f',
+        '--flame_graph_reports',
+        default='perf_reports',
+        type=str,
+        help='Name of directory to output flame graph profiles to, if any are created.'
+    )
+    argp.add_argument(
+        '-u',
+        '--remote_host_username',
+        default='',
+        type=str,
+        help='Use a username that isn\'t "Jenkins" to SSH into remote workers.')
 
-  args = argp.parse_args()
+    args = argp.parse_args()
 
-  global _REMOTE_HOST_USERNAME
-  if args.remote_host_username:
-    _REMOTE_HOST_USERNAME = args.remote_host_username
+    global _REMOTE_HOST_USERNAME
+    if args.remote_host_username:
+        _REMOTE_HOST_USERNAME = args.remote_host_username
 
-  languages = set(scenario_config.LANGUAGES[l]
-                  for l in itertools.chain.from_iterable(
-                        six.iterkeys(scenario_config.LANGUAGES) if x == 'all'
-                        else [x] for x in args.language))
+    languages = set(
+        scenario_config.LANGUAGES[l]
+        for l in itertools.chain.from_iterable(
+            six.iterkeys(scenario_config.LANGUAGES) if x == 'all' else [x]
+            for x in args.language))
 
+    # Put together set of remote hosts where to run and build
+    remote_hosts = set()
+    if args.remote_worker_host:
+        for host in args.remote_worker_host:
+            remote_hosts.add(host)
+    if args.remote_driver_host:
+        remote_hosts.add(args.remote_driver_host)
 
-  # Put together set of remote hosts where to run and build
-  remote_hosts = set()
-  if args.remote_worker_host:
-    for host in args.remote_worker_host:
-      remote_hosts.add(host)
-  if args.remote_driver_host:
-    remote_hosts.add(args.remote_driver_host)
+    if not args.dry_run:
+        if remote_hosts:
+            archive_repo(languages=[str(l) for l in languages])
+            prepare_remote_hosts(remote_hosts, prepare_local=True)
+        else:
+            prepare_remote_hosts([], prepare_local=True)
 
-  if not args.dry_run:
-    if remote_hosts:
-      archive_repo(languages=[str(l) for l in languages])
-      prepare_remote_hosts(remote_hosts, prepare_local=True)
-    else:
-      prepare_remote_hosts([], prepare_local=True)
+    build_local = False
+    if not args.remote_driver_host:
+        build_local = True
+    if not args.dry_run:
+        build_on_remote_hosts(
+            remote_hosts,
+            languages=[str(l) for l in languages],
+            build_local=build_local)
 
-  build_local = False
-  if not args.remote_driver_host:
-    build_local = True
-  if not args.dry_run:
-    build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build_local=build_local)
+    perf_cmd = None
+    if args.perf_args:
+        print('Running workers under perf profiler')
+        # Expect /usr/bin/perf to be installed here, as is usual
+        perf_cmd = ['/usr/bin/perf']
+        perf_cmd.extend(re.split('\s+', args.perf_args))
 
-  perf_cmd = None
-  if args.perf_args:
-    print('Running workers under perf profiler')
-    # Expect /usr/bin/perf to be installed here, as is usual
-    perf_cmd = ['/usr/bin/perf']
-    perf_cmd.extend(re.split('\s+', args.perf_args))
+    qpsworker_jobs = create_qpsworkers(
+        languages, args.remote_worker_host, perf_cmd=perf_cmd)
 
-  qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host, perf_cmd=perf_cmd)
+    # get list of worker addresses for each language.
+    workers_by_lang = dict([(str(language), []) for language in languages])
+    for job in qpsworker_jobs:
+        workers_by_lang[str(job.language)].append(job)
 
-  # get list of worker addresses for each language.
-  workers_by_lang = dict([(str(language), []) for language in languages])
-  for job in qpsworker_jobs:
-    workers_by_lang[str(job.language)].append(job)
+    scenarios = create_scenarios(
+        languages,
+        workers_by_lang=workers_by_lang,
+        remote_host=args.remote_driver_host,
+        regex=args.regex,
+        category=args.category,
+        bq_result_table=args.bq_result_table,
+        netperf=args.netperf,
+        netperf_hosts=args.remote_worker_host,
+        server_cpu_load=args.server_cpu_load)
 
-  scenarios = create_scenarios(languages,
-                             workers_by_lang=workers_by_lang,
-                             remote_host=args.remote_driver_host,
-                             regex=args.regex,
-                             category=args.category,
-                             bq_result_table=args.bq_result_table,
-                             netperf=args.netperf,
-                             netperf_hosts=args.remote_worker_host,
-                             server_cpu_load=args.server_cpu_load)
+    if not scenarios:
+        raise Exception('No scenarios to run')
 
-  if not scenarios:
-    raise Exception('No scenarios to run')
+    total_scenario_failures = 0
+    qps_workers_killed = 0
+    merged_resultset = {}
+    perf_report_failures = 0
 
-  total_scenario_failures = 0
-  qps_workers_killed = 0
-  merged_resultset = {}
-  perf_report_failures = 0
+    for scenario in scenarios:
+        if args.dry_run:
+            print(scenario.name)
+        else:
+            scenario_failures = 0
+            try:
+                for worker in scenario.workers:
+                    worker.start()
+                jobs = [scenario.jobspec]
+                if scenario.workers:
+                    jobs.append(
+                        create_quit_jobspec(
+                            scenario.workers,
+                            remote_host=args.remote_driver_host))
+                scenario_failures, resultset = jobset.run(
+                    jobs, newline_on_success=True, maxjobs=1)
+                total_scenario_failures += scenario_failures
+                merged_resultset = dict(
+                    itertools.chain(
+                        six.iteritems(merged_resultset),
+                        six.iteritems(resultset)))
+            finally:
+                # Consider qps workers that need to be killed as failures
+                qps_workers_killed += finish_qps_workers(scenario.workers,
+                                                         qpsworker_jobs)
 
-  for scenario in scenarios:
-    if args.dry_run:
-      print(scenario.name)
-    else:
-      scenario_failures = 0
-      try:
-        for worker in scenario.workers:
-          worker.start()
-        jobs = [scenario.jobspec]
-        if scenario.workers:
-          jobs.append(create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host))
-        scenario_failures, resultset = jobset.run(jobs, newline_on_success=True, maxjobs=1)
-        total_scenario_failures += scenario_failures
-        merged_resultset = dict(itertools.chain(six.iteritems(merged_resultset),
-                                                six.iteritems(resultset)))
-      finally:
-        # Consider qps workers that need to be killed as failures
-        qps_workers_killed += finish_qps_workers(scenario.workers, qpsworker_jobs)
+            if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
+                workers_and_base_names = {}
+                for worker in scenario.workers:
+                    if not worker.perf_file_base_name:
+                        raise Exception(
+                            'using perf buf perf report filename is unspecified')
+                    workers_and_base_names[
+                        worker.host_and_port] = worker.perf_file_base_name
+                perf_report_failures += run_collect_perf_profile_jobs(
+                    workers_and_base_names, scenario.name,
+                    args.flame_graph_reports)
 
-      if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
-        workers_and_base_names = {}
-        for worker in scenario.workers:
-          if not worker.perf_file_base_name:
-            raise Exception('using perf buf perf report filename is unspecified')
-          workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name
-        perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name, args.flame_graph_reports)
+    # Still write the index.html even if some scenarios failed.
+    # 'profile_output_files' will only have names for scenarios that passed
+    if perf_cmd and not args.skip_generate_flamegraphs:
+        # write the index fil to the output dir, with all profiles from all scenarios/workers
+        report_utils.render_perf_profiling_results(
+            '%s/index.html' % args.flame_graph_reports, profile_output_files)
 
+    report_utils.render_junit_xml_report(
+        merged_resultset, args.xml_report, suite_name='benchmarks')
 
-  # Still write the index.html even if some scenarios failed.
-  # 'profile_output_files' will only have names for scenarios that passed
-  if perf_cmd and not args.skip_generate_flamegraphs:
-    # write the index fil to the output dir, with all profiles from all scenarios/workers
-    report_utils.render_perf_profiling_results('%s/index.html' % args.flame_graph_reports, profile_output_files)
+    if total_scenario_failures > 0 or qps_workers_killed > 0:
+        print('%s scenarios failed and %s qps worker jobs killed' %
+              (total_scenario_failures, qps_workers_killed))
+        sys.exit(1)
 
-  report_utils.render_junit_xml_report(merged_resultset, args.xml_report,
-                                       suite_name='benchmarks')
+    if perf_report_failures > 0:
+        print('%s perf profile collection jobs failed' % perf_report_failures)
+        sys.exit(1)
 
-  if total_scenario_failures > 0 or qps_workers_killed > 0:
-    print('%s scenarios failed and %s qps worker jobs killed' % (total_scenario_failures, qps_workers_killed))
-    sys.exit(1)
-
-  if perf_report_failures > 0:
-    print('%s perf profile collection jobs failed' % perf_report_failures)
-    sys.exit(1)
 
 if __name__ == "__main__":
-  main()
+    main()
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 471f5d9..bd5b864 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Run tests in parallel."""
 
 from __future__ import print_function
@@ -46,36 +45,34 @@
 import python_utils.watch_dirs as watch_dirs
 import python_utils.start_port_server as start_port_server
 try:
-  from python_utils.upload_test_results import upload_results_to_bq
+    from python_utils.upload_test_results import upload_results_to_bq
 except (ImportError):
-  pass # It's ok to not import because this is only necessary to upload results to BQ.
+    pass  # It's ok to not import because this is only necessary to upload results to BQ.
 
-gcp_utils_dir = os.path.abspath(os.path.join(
-        os.path.dirname(__file__), '../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../gcp/utils'))
 sys.path.append(gcp_utils_dir)
 
 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 os.chdir(_ROOT)
 
-
 _FORCE_ENVIRON_FOR_WRAPPERS = {
-  'GRPC_VERBOSITY': 'DEBUG',
+    'GRPC_VERBOSITY': 'DEBUG',
 }
 
 _POLLING_STRATEGIES = {
-  'linux': ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv'],
-  'mac': ['poll'],
+    'linux': ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv'],
+    'mac': ['poll'],
 }
 
-
 BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
 
 
 def get_bqtest_data(limit=None):
-  import big_query_utils
+    import big_query_utils
 
-  bq = big_query_utils.create_big_query()
-  query = """
+    bq = big_query_utils.create_big_query()
+    query = """
 SELECT
   filtered_test_name,
   SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
@@ -88,941 +85,1068 @@
     [grpc-testing:jenkins_test_results.aggregate_results]
   WHERE
     timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
-    AND platform = '"""+platform_string()+"""'
+    AND platform = '""" + platform_string() + """'
     AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
 GROUP BY
   filtered_test_name"""
-  if limit:
-    query += " limit {}".format(limit)
-  query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
-  page = bq.jobs().getQueryResults(
-      pageToken=None,
-      **query_job['jobReference']).execute(num_retries=3)
-  test_data = [BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true', float(row['f'][2]['v'])) for row in page['rows']]
-  return test_data
+    if limit:
+        query += " limit {}".format(limit)
+    query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
+    page = bq.jobs().getQueryResults(
+        pageToken=None, **query_job['jobReference']).execute(num_retries=3)
+    test_data = [
+        BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
+                         float(row['f'][2]['v'])) for row in page['rows']
+    ]
+    return test_data
 
 
 def platform_string():
-  return jobset.platform_string()
+    return jobset.platform_string()
 
 
 _DEFAULT_TIMEOUT_SECONDS = 5 * 60
 
+
 def run_shell_command(cmd, env=None, cwd=None):
-  try:
-    subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
-  except subprocess.CalledProcessError as e:
-    logging.exception("Error while running command '%s'. Exit status %d. Output:\n%s",
-                       e.cmd, e.returncode, e.output)
-    raise
+    try:
+        subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
+    except subprocess.CalledProcessError as e:
+        logging.exception(
+            "Error while running command '%s'. Exit status %d. Output:\n%s",
+            e.cmd, e.returncode, e.output)
+        raise
+
 
 def max_parallel_tests_for_current_platform():
-  # Too much test parallelization has only been seen to be a problem
-  # so far on windows.
-  if jobset.platform_string() == 'windows':
-    return 64
-  return 1024
+    # Too much test parallelization has only been seen to be a problem
+    # so far on windows.
+    if jobset.platform_string() == 'windows':
+        return 64
+    return 1024
+
 
 # SimpleConfig: just compile with CONFIG=config, and run the binary to test
 class Config(object):
 
-  def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[], iomgr_platform='native'):
-    if environ is None:
-      environ = {}
-    self.build_config = config
-    self.environ = environ
-    self.environ['CONFIG'] = config
-    self.tool_prefix = tool_prefix
-    self.timeout_multiplier = timeout_multiplier
-    self.iomgr_platform = iomgr_platform
+    def __init__(self,
+                 config,
+                 environ=None,
+                 timeout_multiplier=1,
+                 tool_prefix=[],
+                 iomgr_platform='native'):
+        if environ is None:
+            environ = {}
+        self.build_config = config
+        self.environ = environ
+        self.environ['CONFIG'] = config
+        self.tool_prefix = tool_prefix
+        self.timeout_multiplier = timeout_multiplier
+        self.iomgr_platform = iomgr_platform
 
-  def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
-               shortname=None, environ={}, cpu_cost=1.0, flaky=False):
-    """Construct a jobset.JobSpec for a test under this config
+    def job_spec(self,
+                 cmdline,
+                 timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
+                 shortname=None,
+                 environ={},
+                 cpu_cost=1.0,
+                 flaky=False):
+        """Construct a jobset.JobSpec for a test under this config
 
        Args:
          cmdline:      a list of strings specifying the command line the test
                        would like to run
     """
-    actual_environ = self.environ.copy()
-    for k, v in environ.items():
-      actual_environ[k] = v
-    if not flaky and shortname and shortname in flaky_tests:
-      flaky = True
-    if shortname in shortname_to_cpu:
-      cpu_cost = shortname_to_cpu[shortname]
-    return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
-                          shortname=shortname,
-                          environ=actual_environ,
-                          cpu_cost=cpu_cost,
-                          timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
-                          flake_retries=4 if flaky or args.allow_flakes else 0,
-                          timeout_retries=1 if flaky or args.allow_flakes else 0)
+        actual_environ = self.environ.copy()
+        for k, v in environ.items():
+            actual_environ[k] = v
+        if not flaky and shortname and shortname in flaky_tests:
+            flaky = True
+        if shortname in shortname_to_cpu:
+            cpu_cost = shortname_to_cpu[shortname]
+        return jobset.JobSpec(
+            cmdline=self.tool_prefix + cmdline,
+            shortname=shortname,
+            environ=actual_environ,
+            cpu_cost=cpu_cost,
+            timeout_seconds=(self.timeout_multiplier * timeout_seconds
+                             if timeout_seconds else None),
+            flake_retries=4 if flaky or args.allow_flakes else 0,
+            timeout_retries=1 if flaky or args.allow_flakes else 0)
 
 
-def get_c_tests(travis, test_lang) :
-  out = []
-  platforms_str = 'ci_platforms' if travis else 'platforms'
-  with open('tools/run_tests/generated/tests.json') as f:
-    js = json.load(f)
-    return [tgt
-            for tgt in js
-            if tgt['language'] == test_lang and
-                platform_string() in tgt[platforms_str] and
-                not (travis and tgt['flaky'])]
+def get_c_tests(travis, test_lang):
+    out = []
+    platforms_str = 'ci_platforms' if travis else 'platforms'
+    with open('tools/run_tests/generated/tests.json') as f:
+        js = json.load(f)
+        return [
+            tgt for tgt in js
+            if tgt['language'] == test_lang and platform_string() in tgt[
+                platforms_str] and not (travis and tgt['flaky'])
+        ]
 
 
 def _check_compiler(compiler, supported_compilers):
-  if compiler not in supported_compilers:
-    raise Exception('Compiler %s not supported (on this platform).' % compiler)
+    if compiler not in supported_compilers:
+        raise Exception('Compiler %s not supported (on this platform).' %
+                        compiler)
 
 
 def _check_arch(arch, supported_archs):
-  if arch not in supported_archs:
-    raise Exception('Architecture %s not supported.' % arch)
+    if arch not in supported_archs:
+        raise Exception('Architecture %s not supported.' % arch)
 
 
 def _is_use_docker_child():
-  """Returns True if running running as a --use_docker child."""
-  return True if os.getenv('RUN_TESTS_COMMAND') else False
+    """Returns True if running running as a --use_docker child."""
+    return True if os.getenv('RUN_TESTS_COMMAND') else False
 
 
-_PythonConfigVars = collections.namedtuple(
-  '_ConfigVars', ['shell', 'builder', 'builder_prefix_arguments',
-                  'venv_relative_python', 'toolchain', 'runner'])
+_PythonConfigVars = collections.namedtuple('_ConfigVars', [
+    'shell', 'builder', 'builder_prefix_arguments', 'venv_relative_python',
+    'toolchain', 'runner'
+])
 
 
 def _python_config_generator(name, major, minor, bits, config_vars):
-  return PythonConfig(
-    name,
-    config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
-      _python_pattern_function(major=major, minor=minor, bits=bits)] + [
-      name] + config_vars.venv_relative_python + config_vars.toolchain,
-    config_vars.shell + config_vars.runner + [
-      os.path.join(name, config_vars.venv_relative_python[0])])
+    return PythonConfig(
+        name, config_vars.shell + config_vars.builder +
+        config_vars.builder_prefix_arguments + [
+            _python_pattern_function(major=major, minor=minor, bits=bits)
+        ] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
+        config_vars.shell + config_vars.runner +
+        [os.path.join(name, config_vars.venv_relative_python[0])])
 
 
 def _pypy_config_generator(name, major, config_vars):
-  return PythonConfig(
-    name,
-    config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
-      _pypy_pattern_function(major=major)] + [
-      name] + config_vars.venv_relative_python + config_vars.toolchain,
-    config_vars.shell + config_vars.runner + [
-      os.path.join(name, config_vars.venv_relative_python[0])])
+    return PythonConfig(
+        name,
+        config_vars.shell + config_vars.builder +
+        config_vars.builder_prefix_arguments + [
+            _pypy_pattern_function(major=major)
+        ] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
+        config_vars.shell + config_vars.runner +
+        [os.path.join(name, config_vars.venv_relative_python[0])])
 
 
 def _python_pattern_function(major, minor, bits):
-  # Bit-ness is handled by the test machine's environment
-  if os.name == "nt":
-    if bits == "64":
-      return '/c/Python{major}{minor}/python.exe'.format(
-        major=major, minor=minor, bits=bits)
+    # Bit-ness is handled by the test machine's environment
+    if os.name == "nt":
+        if bits == "64":
+            return '/c/Python{major}{minor}/python.exe'.format(
+                major=major, minor=minor, bits=bits)
+        else:
+            return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
+                major=major, minor=minor, bits=bits)
     else:
-      return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
-        major=major, minor=minor, bits=bits)
-  else:
-    return 'python{major}.{minor}'.format(major=major, minor=minor)
+        return 'python{major}.{minor}'.format(major=major, minor=minor)
 
 
 def _pypy_pattern_function(major):
-  if major == '2':
-    return 'pypy'
-  elif major == '3':
-    return 'pypy3'
-  else:
-    raise ValueError("Unknown PyPy major version")
+    if major == '2':
+        return 'pypy'
+    elif major == '3':
+        return 'pypy3'
+    else:
+        raise ValueError("Unknown PyPy major version")
 
 
 class CLanguage(object):
 
-  def __init__(self, make_target, test_lang):
-    self.make_target = make_target
-    self.platform = platform_string()
-    self.test_lang = test_lang
+    def __init__(self, make_target, test_lang):
+        self.make_target = make_target
+        self.platform = platform_string()
+        self.test_lang = test_lang
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    if self.platform == 'windows':
-      _check_compiler(self.args.compiler, ['default', 'cmake', 'cmake_vs2015',
-                                           'cmake_vs2017'])
-      _check_arch(self.args.arch, ['default', 'x64', 'x86'])
-      self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
-      self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
-      self._use_cmake = True
-      self._make_options = []
-    elif self.args.compiler == 'cmake':
-      _check_arch(self.args.arch, ['default'])
-      self._use_cmake = True
-      self._docker_distro = 'jessie'
-      self._make_options = []
-    else:
-      self._use_cmake = False
-      self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
-                                                                       self.args.compiler)
-    if args.iomgr_platform == "uv":
-      cflags = '-DGRPC_UV -DGRPC_UV_THREAD_CHECK'
-      try:
-        cflags += subprocess.check_output(['pkg-config', '--cflags', 'libuv']).strip() + ' '
-      except (subprocess.CalledProcessError, OSError):
-        pass
-      try:
-        ldflags = subprocess.check_output(['pkg-config', '--libs', 'libuv']).strip() + ' '
-      except (subprocess.CalledProcessError, OSError):
-        ldflags = '-luv '
-      self._make_options += ['EXTRA_CPPFLAGS={}'.format(cflags),
-                             'EXTRA_LDLIBS={}'.format(ldflags)]
-
-  def test_specs(self):
-    out = []
-    binaries = get_c_tests(self.args.travis, self.test_lang)
-    for target in binaries:
-      if self._use_cmake and target.get('boringssl', False):
-        # cmake doesn't build boringssl tests
-        continue
-      auto_timeout_scaling = target.get('auto_timeout_scaling', True)
-      polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
-                            if target.get('uses_polling', True)
-                            else ['none'])
-      if self.args.iomgr_platform == 'uv':
-        polling_strategies = ['all']
-      for polling_strategy in polling_strategies:
-        env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
-                 _ROOT + '/src/core/tsi/test_creds/ca.pem',
-             'GRPC_POLL_STRATEGY': polling_strategy,
-             'GRPC_VERBOSITY': 'DEBUG'}
-        resolver = os.environ.get('GRPC_DNS_RESOLVER', None);
-        if resolver:
-          env['GRPC_DNS_RESOLVER'] = resolver
-        shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
-        if polling_strategy in target.get('excluded_poll_engines', []):
-          continue
-
-        timeout_scaling = 1
-        if auto_timeout_scaling:
-          config = self.args.config
-          if ('asan' in config
-              or config == 'msan'
-              or config == 'tsan'
-              or config == 'ubsan'
-              or config == 'helgrind'
-              or config == 'memcheck'):
-            # Scale overall test timeout if running under various sanitizers.
-            # scaling value is based on historical data analysis
-            timeout_scaling *= 3
-          elif polling_strategy == 'poll-cv':
-            # scale test timeout if running with poll-cv
-            # sanitizer and poll-cv scaling is not cumulative to ensure
-            # reasonable timeout values.
-            # TODO(jtattermusch): based on historical data and 5min default
-            # test timeout poll-cv scaling is currently not useful.
-            # Leaving here so it can be reintroduced if the default test timeout
-            # is decreased in the future.
-            timeout_scaling *= 1
-
-        if self.config.build_config in target['exclude_configs']:
-          continue
-        if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
-          continue
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
         if self.platform == 'windows':
-          binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[self.config.build_config], target['name'])
+            _check_compiler(self.args.compiler, [
+                'default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
+            ])
+            _check_arch(self.args.arch, ['default', 'x64', 'x86'])
+            self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
+            self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
+            self._use_cmake = True
+            self._make_options = []
+        elif self.args.compiler == 'cmake':
+            _check_arch(self.args.arch, ['default'])
+            self._use_cmake = True
+            self._docker_distro = 'jessie'
+            self._make_options = []
         else:
-          if self._use_cmake:
-            binary = 'cmake/build/%s' % target['name']
-          else:
-            binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
-        cpu_cost = target['cpu_cost']
-        if cpu_cost == 'capacity':
-          cpu_cost = multiprocessing.cpu_count()
-        if os.path.isfile(binary):
-          list_test_command = None
-          filter_test_command = None
+            self._use_cmake = False
+            self._docker_distro, self._make_options = self._compiler_options(
+                self.args.use_docker, self.args.compiler)
+        if args.iomgr_platform == "uv":
+            cflags = '-DGRPC_UV -DGRPC_UV_THREAD_CHECK'
+            try:
+                cflags += subprocess.check_output(
+                    ['pkg-config', '--cflags', 'libuv']).strip() + ' '
+            except (subprocess.CalledProcessError, OSError):
+                pass
+            try:
+                ldflags = subprocess.check_output(
+                    ['pkg-config', '--libs', 'libuv']).strip() + ' '
+            except (subprocess.CalledProcessError, OSError):
+                ldflags = '-luv '
+            self._make_options += [
+                'EXTRA_CPPFLAGS={}'.format(cflags),
+                'EXTRA_LDLIBS={}'.format(ldflags)
+            ]
 
-          # these are the flag defined by gtest and benchmark framework to list
-          # and filter test runs. We use them to split each individual test
-          # into its own JobSpec, and thus into its own process.
-          if 'benchmark' in target and target['benchmark']:
-            with open(os.devnull, 'w') as fnull:
-              tests = subprocess.check_output([binary, '--benchmark_list_tests'],
-                                              stderr=fnull)
-            for line in tests.split('\n'):
-              test = line.strip()
-              if not test: continue
-              cmdline = [binary, '--benchmark_filter=%s$' % test] + target['args']
-              out.append(self.config.job_spec(cmdline,
-                                              shortname='%s %s' % (' '.join(cmdline), shortname_ext),
-                                              cpu_cost=cpu_cost,
-                                              timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
-                                              environ=env))
-          elif 'gtest' in target and target['gtest']:
-            # here we parse the output of --gtest_list_tests to build up a complete
-            # list of the tests contained in a binary for each test, we then
-            # add a job to run, filtering for just that test.
-            with open(os.devnull, 'w') as fnull:
-              tests = subprocess.check_output([binary, '--gtest_list_tests'],
-                                              stderr=fnull)
-            base = None
-            for line in tests.split('\n'):
-              i = line.find('#')
-              if i >= 0: line = line[:i]
-              if not line: continue
-              if line[0] != ' ':
-                base = line.strip()
-              else:
-                assert base is not None
-                assert line[1] == ' '
-                test = base + line.strip()
-                cmdline = [binary, '--gtest_filter=%s' % test] + target['args']
-                out.append(self.config.job_spec(cmdline,
-                                                shortname='%s %s' % (' '.join(cmdline), shortname_ext),
-                                                cpu_cost=cpu_cost,
-                                                timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
-                                                environ=env))
-          else:
-            cmdline = [binary] + target['args']
-            shortname = target.get('shortname', ' '.join(
-                          pipes.quote(arg)
-                          for arg in cmdline))
-            shortname += shortname_ext
-            out.append(self.config.job_spec(cmdline,
-                                            shortname=shortname,
-                                            cpu_cost=cpu_cost,
-                                            flaky=target.get('flaky', False),
-                                            timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
-                                            environ=env))
-        elif self.args.regex == '.*' or self.platform == 'windows':
-          print('\nWARNING: binary not found, skipping', binary)
-    return sorted(out)
+    def test_specs(self):
+        out = []
+        binaries = get_c_tests(self.args.travis, self.test_lang)
+        for target in binaries:
+            if self._use_cmake and target.get('boringssl', False):
+                # cmake doesn't build boringssl tests
+                continue
+            auto_timeout_scaling = target.get('auto_timeout_scaling', True)
+            polling_strategies = (
+                _POLLING_STRATEGIES.get(self.platform, ['all'])
+                if target.get('uses_polling', True) else ['none'])
+            if self.args.iomgr_platform == 'uv':
+                polling_strategies = ['all']
+            for polling_strategy in polling_strategies:
+                env = {
+                    'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
+                    _ROOT + '/src/core/tsi/test_creds/ca.pem',
+                    'GRPC_POLL_STRATEGY':
+                    polling_strategy,
+                    'GRPC_VERBOSITY':
+                    'DEBUG'
+                }
+                resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
+                if resolver:
+                    env['GRPC_DNS_RESOLVER'] = resolver
+                shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
+                if polling_strategy in target.get('excluded_poll_engines', []):
+                    continue
 
-  def make_targets(self):
-    if self.platform == 'windows':
-      # don't build tools on windows just yet
-      return ['buildtests_%s' % self.make_target]
-    return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target,
-            'check_epollexclusive']
+                timeout_scaling = 1
+                if auto_timeout_scaling:
+                    config = self.args.config
+                    if ('asan' in config or config == 'msan' or
+                            config == 'tsan' or config == 'ubsan' or
+                            config == 'helgrind' or config == 'memcheck'):
+                        # Scale overall test timeout if running under various sanitizers.
+                        # scaling value is based on historical data analysis
+                        timeout_scaling *= 3
+                    elif polling_strategy == 'poll-cv':
+                        # scale test timeout if running with poll-cv
+                        # sanitizer and poll-cv scaling is not cumulative to ensure
+                        # reasonable timeout values.
+                        # TODO(jtattermusch): based on historical data and 5min default
+                        # test timeout poll-cv scaling is currently not useful.
+                        # Leaving here so it can be reintroduced if the default test timeout
+                        # is decreased in the future.
+                        timeout_scaling *= 1
 
-  def make_options(self):
-    return self._make_options
+                if self.config.build_config in target['exclude_configs']:
+                    continue
+                if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
+                    continue
+                if self.platform == 'windows':
+                    binary = 'cmake/build/%s/%s.exe' % (
+                        _MSBUILD_CONFIG[self.config.build_config],
+                        target['name'])
+                else:
+                    if self._use_cmake:
+                        binary = 'cmake/build/%s' % target['name']
+                    else:
+                        binary = 'bins/%s/%s' % (self.config.build_config,
+                                                 target['name'])
+                cpu_cost = target['cpu_cost']
+                if cpu_cost == 'capacity':
+                    cpu_cost = multiprocessing.cpu_count()
+                if os.path.isfile(binary):
+                    list_test_command = None
+                    filter_test_command = None
 
-  def pre_build_steps(self):
-    if self.platform == 'windows':
-      return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
-               self._cmake_generator_option,
-               self._cmake_arch_option]]
-    elif self._use_cmake:
-      return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
-    else:
-      return []
+                    # these are the flag defined by gtest and benchmark framework to list
+                    # and filter test runs. We use them to split each individual test
+                    # into its own JobSpec, and thus into its own process.
+                    if 'benchmark' in target and target['benchmark']:
+                        with open(os.devnull, 'w') as fnull:
+                            tests = subprocess.check_output(
+                                [binary, '--benchmark_list_tests'],
+                                stderr=fnull)
+                        for line in tests.split('\n'):
+                            test = line.strip()
+                            if not test: continue
+                            cmdline = [binary, '--benchmark_filter=%s$' % test
+                                      ] + target['args']
+                            out.append(
+                                self.config.job_spec(
+                                    cmdline,
+                                    shortname='%s %s' % (' '.join(cmdline),
+                                                         shortname_ext),
+                                    cpu_cost=cpu_cost,
+                                    timeout_seconds=_DEFAULT_TIMEOUT_SECONDS *
+                                    timeout_scaling,
+                                    environ=env))
+                    elif 'gtest' in target and target['gtest']:
+                        # here we parse the output of --gtest_list_tests to build up a complete
+                        # list of the tests contained in a binary for each test, we then
+                        # add a job to run, filtering for just that test.
+                        with open(os.devnull, 'w') as fnull:
+                            tests = subprocess.check_output(
+                                [binary, '--gtest_list_tests'], stderr=fnull)
+                        base = None
+                        for line in tests.split('\n'):
+                            i = line.find('#')
+                            if i >= 0: line = line[:i]
+                            if not line: continue
+                            if line[0] != ' ':
+                                base = line.strip()
+                            else:
+                                assert base is not None
+                                assert line[1] == ' '
+                                test = base + line.strip()
+                                cmdline = [binary, '--gtest_filter=%s' % test
+                                          ] + target['args']
+                                out.append(
+                                    self.config.job_spec(
+                                        cmdline,
+                                        shortname='%s %s' % (' '.join(cmdline),
+                                                             shortname_ext),
+                                        cpu_cost=cpu_cost,
+                                        timeout_seconds=target.get(
+                                            'timeout_seconds',
+                                            _DEFAULT_TIMEOUT_SECONDS) *
+                                        timeout_scaling,
+                                        environ=env))
+                    else:
+                        cmdline = [binary] + target['args']
+                        shortname = target.get('shortname', ' '.join(
+                            pipes.quote(arg) for arg in cmdline))
+                        shortname += shortname_ext
+                        out.append(
+                            self.config.job_spec(
+                                cmdline,
+                                shortname=shortname,
+                                cpu_cost=cpu_cost,
+                                flaky=target.get('flaky', False),
+                                timeout_seconds=target.get(
+                                    'timeout_seconds', _DEFAULT_TIMEOUT_SECONDS)
+                                * timeout_scaling,
+                                environ=env))
+                elif self.args.regex == '.*' or self.platform == 'windows':
+                    print('\nWARNING: binary not found, skipping', binary)
+        return sorted(out)
 
-  def build_steps(self):
-    return []
+    def make_targets(self):
+        if self.platform == 'windows':
+            # don't build tools on windows just yet
+            return ['buildtests_%s' % self.make_target]
+        return [
+            'buildtests_%s' % self.make_target, 'tools_%s' % self.make_target,
+            'check_epollexclusive'
+        ]
 
-  def post_tests_steps(self):
-    if self.platform == 'windows':
-      return []
-    else:
-      return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
+    def make_options(self):
+        return self._make_options
 
-  def makefile_name(self):
-    if self._use_cmake:
-      return 'cmake/build/Makefile'
-    else:
-      return 'Makefile'
+    def pre_build_steps(self):
+        if self.platform == 'windows':
+            return [[
+                'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
+                self._cmake_generator_option, self._cmake_arch_option
+            ]]
+        elif self._use_cmake:
+            return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
+        else:
+            return []
 
-  def _clang_make_options(self, version_suffix=''):
-    return ['CC=clang%s' % version_suffix,
-            'CXX=clang++%s' % version_suffix,
-            'LD=clang%s' % version_suffix,
-            'LDXX=clang++%s' % version_suffix]
+    def build_steps(self):
+        return []
 
-  def _gcc_make_options(self, version_suffix):
-    return ['CC=gcc%s' % version_suffix,
-            'CXX=g++%s' % version_suffix,
-            'LD=gcc%s' % version_suffix,
-            'LDXX=g++%s' % version_suffix]
+    def post_tests_steps(self):
+        if self.platform == 'windows':
+            return []
+        else:
+            return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
 
-  def _compiler_options(self, use_docker, compiler):
-    """Returns docker distro and make options to use for given compiler."""
-    if not use_docker and not _is_use_docker_child():
-      _check_compiler(compiler, ['default'])
+    def makefile_name(self):
+        if self._use_cmake:
+            return 'cmake/build/Makefile'
+        else:
+            return 'Makefile'
 
-    if compiler == 'gcc4.9' or compiler == 'default':
-      return ('jessie', [])
-    elif compiler == 'gcc4.8':
-      return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
-    elif compiler == 'gcc5.3':
-      return ('ubuntu1604', [])
-    elif compiler == 'gcc_musl':
-      return ('alpine', [])
-    elif compiler == 'clang3.4':
-      # on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
-      return ('ubuntu1404', self._clang_make_options())
-    elif compiler == 'clang3.5':
-      return ('jessie', self._clang_make_options(version_suffix='-3.5'))
-    elif compiler == 'clang3.6':
-      return ('ubuntu1604', self._clang_make_options(version_suffix='-3.6'))
-    elif compiler == 'clang3.7':
-      return ('ubuntu1604', self._clang_make_options(version_suffix='-3.7'))
-    else:
-      raise Exception('Compiler %s not supported.' % compiler)
+    def _clang_make_options(self, version_suffix=''):
+        return [
+            'CC=clang%s' % version_suffix, 'CXX=clang++%s' % version_suffix,
+            'LD=clang%s' % version_suffix, 'LDXX=clang++%s' % version_suffix
+        ]
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/cxx_%s_%s' % (self._docker_distro,
-                                                _docker_arch_suffix(self.args.arch))
+    def _gcc_make_options(self, version_suffix):
+        return [
+            'CC=gcc%s' % version_suffix, 'CXX=g++%s' % version_suffix,
+            'LD=gcc%s' % version_suffix, 'LDXX=g++%s' % version_suffix
+        ]
 
-  def __str__(self):
-    return self.make_target
+    def _compiler_options(self, use_docker, compiler):
+        """Returns docker distro and make options to use for given compiler."""
+        if not use_docker and not _is_use_docker_child():
+            _check_compiler(compiler, ['default'])
+
+        if compiler == 'gcc4.9' or compiler == 'default':
+            return ('jessie', [])
+        elif compiler == 'gcc4.8':
+            return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
+        elif compiler == 'gcc5.3':
+            return ('ubuntu1604', [])
+        elif compiler == 'gcc_musl':
+            return ('alpine', [])
+        elif compiler == 'clang3.4':
+            # on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
+            return ('ubuntu1404', self._clang_make_options())
+        elif compiler == 'clang3.5':
+            return ('jessie', self._clang_make_options(version_suffix='-3.5'))
+        elif compiler == 'clang3.6':
+            return ('ubuntu1604',
+                    self._clang_make_options(version_suffix='-3.6'))
+        elif compiler == 'clang3.7':
+            return ('ubuntu1604',
+                    self._clang_make_options(version_suffix='-3.7'))
+        else:
+            raise Exception('Compiler %s not supported.' % compiler)
+
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/cxx_%s_%s' % (
+            self._docker_distro, _docker_arch_suffix(self.args.arch))
+
+    def __str__(self):
+        return self.make_target
 
 
 # This tests Node on grpc/grpc-node and will become the standard for Node testing
 class RemoteNodeLanguage(object):
 
-  def __init__(self):
-    self.platform = platform_string()
+    def __init__(self):
+        self.platform = platform_string()
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    # Note: electron ABI only depends on major and minor version, so that's all
-    # we should specify in the compiler argument
-    _check_compiler(self.args.compiler, ['default', 'node0.12',
-                                         'node4', 'node5', 'node6',
-                                         'node7', 'node8',
-                                         'electron1.3', 'electron1.6'])
-    if self.args.compiler == 'default':
-      self.runtime = 'node'
-      self.node_version = '8'
-    else:
-      if self.args.compiler.startswith('electron'):
-        self.runtime = 'electron'
-        self.node_version = self.args.compiler[8:]
-      else:
-        self.runtime = 'node'
-        # Take off the word "node"
-        self.node_version = self.args.compiler[4:]
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        # Note: electron ABI only depends on major and minor version, so that's all
+        # we should specify in the compiler argument
+        _check_compiler(self.args.compiler, [
+            'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
+            'electron1.3', 'electron1.6'
+        ])
+        if self.args.compiler == 'default':
+            self.runtime = 'node'
+            self.node_version = '8'
+        else:
+            if self.args.compiler.startswith('electron'):
+                self.runtime = 'electron'
+                self.node_version = self.args.compiler[8:]
+            else:
+                self.runtime = 'node'
+                # Take off the word "node"
+                self.node_version = self.args.compiler[4:]
 
-  # TODO: update with Windows/electron scripts when available for grpc/grpc-node
-  def test_specs(self):
-    if self.platform == 'windows':
-      return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])]
-    else:
-      return [self.config.job_spec(['tools/run_tests/helper_scripts/run_grpc-node.sh'],
-                                   None,
-                                   environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
+    # TODO: update with Windows/electron scripts when available for grpc/grpc-node
+    def test_specs(self):
+        if self.platform == 'windows':
+            return [
+                self.config.job_spec(
+                    ['tools\\run_tests\\helper_scripts\\run_node.bat'])
+            ]
+        else:
+            return [
+                self.config.job_spec(
+                    ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
+                    None,
+                    environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+            ]
 
-  def pre_build_steps(self):
-    return []
+    def pre_build_steps(self):
+        return []
 
-  def make_targets(self):
-    return []
+    def make_targets(self):
+        return []
 
-  def make_options(self):
-    return []
+    def make_options(self):
+        return []
 
-  def build_steps(self):
-    return []
+    def build_steps(self):
+        return []
 
-  def post_tests_steps(self):
-    return []
+    def post_tests_steps(self):
+        return []
 
-  def makefile_name(self):
-    return 'Makefile'
+    def makefile_name(self):
+        return 'Makefile'
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
+            self.args.arch)
 
-  def __str__(self):
-    return 'grpc-node'
+    def __str__(self):
+        return 'grpc-node'
 
 
 class PhpLanguage(object):
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    _check_compiler(self.args.compiler, ['default'])
-    self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        _check_compiler(self.args.compiler, ['default'])
+        self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
 
-  def test_specs(self):
-    return [self.config.job_spec(['src/php/bin/run_tests.sh'],
-                                  environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
+    def test_specs(self):
+        return [
+            self.config.job_spec(
+                ['src/php/bin/run_tests.sh'],
+                environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+        ]
 
-  def pre_build_steps(self):
-    return []
+    def pre_build_steps(self):
+        return []
 
-  def make_targets(self):
-    return ['static_c', 'shared_c']
+    def make_targets(self):
+        return ['static_c', 'shared_c']
 
-  def make_options(self):
-    return self._make_options;
+    def make_options(self):
+        return self._make_options
 
-  def build_steps(self):
-    return [['tools/run_tests/helper_scripts/build_php.sh']]
+    def build_steps(self):
+        return [['tools/run_tests/helper_scripts/build_php.sh']]
 
-  def post_tests_steps(self):
-    return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
+    def post_tests_steps(self):
+        return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
 
-  def makefile_name(self):
-    return 'Makefile'
+    def makefile_name(self):
+        return 'Makefile'
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(self.args.arch)
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
+            self.args.arch)
 
-  def __str__(self):
-    return 'php'
+    def __str__(self):
+        return 'php'
 
 
 class Php7Language(object):
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    _check_compiler(self.args.compiler, ['default'])
-    self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        _check_compiler(self.args.compiler, ['default'])
+        self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
 
-  def test_specs(self):
-    return [self.config.job_spec(['src/php/bin/run_tests.sh'],
-                                  environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
+    def test_specs(self):
+        return [
+            self.config.job_spec(
+                ['src/php/bin/run_tests.sh'],
+                environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+        ]
 
-  def pre_build_steps(self):
-    return []
+    def pre_build_steps(self):
+        return []
 
-  def make_targets(self):
-    return ['static_c', 'shared_c']
+    def make_targets(self):
+        return ['static_c', 'shared_c']
 
-  def make_options(self):
-    return self._make_options;
+    def make_options(self):
+        return self._make_options
 
-  def build_steps(self):
-    return [['tools/run_tests/helper_scripts/build_php.sh']]
+    def build_steps(self):
+        return [['tools/run_tests/helper_scripts/build_php.sh']]
 
-  def post_tests_steps(self):
-    return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
+    def post_tests_steps(self):
+        return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
 
-  def makefile_name(self):
-    return 'Makefile'
+    def makefile_name(self):
+        return 'Makefile'
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(self.args.arch)
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
+            self.args.arch)
 
-  def __str__(self):
-    return 'php7'
+    def __str__(self):
+        return 'php7'
 
 
-class PythonConfig(collections.namedtuple('PythonConfig', [
-    'name', 'build', 'run'])):
-  """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
+class PythonConfig(
+        collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
+    """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
+
 
 class PythonLanguage(object):
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    self.pythons = self._get_pythons(self.args)
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        self.pythons = self._get_pythons(self.args)
 
-  def test_specs(self):
-    # load list of known test suites
-    with open('src/python/grpcio_tests/tests/tests.json') as tests_json_file:
-      tests_json = json.load(tests_json_file)
-    environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
-    return [self.config.job_spec(
-        config.run,
-        timeout_seconds=5*60,
-        environ=dict(list(environment.items()) +
-                     [('GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
-        shortname='%s.test.%s' % (config.name, suite_name),)
-        for suite_name in tests_json
-        for config in self.pythons]
+    def test_specs(self):
+        # load list of known test suites
+        with open(
+                'src/python/grpcio_tests/tests/tests.json') as tests_json_file:
+            tests_json = json.load(tests_json_file)
+        environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
+        return [
+            self.config.job_spec(
+                config.run,
+                timeout_seconds=5 * 60,
+                environ=dict(
+                    list(environment.items()) + [(
+                        'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
+                shortname='%s.test.%s' % (config.name, suite_name),)
+            for suite_name in tests_json for config in self.pythons
+        ]
 
-  def pre_build_steps(self):
-    return []
+    def pre_build_steps(self):
+        return []
 
-  def make_targets(self):
-    return []
+    def make_targets(self):
+        return []
 
-  def make_options(self):
-    return []
+    def make_options(self):
+        return []
 
-  def build_steps(self):
-    return [config.build for config in self.pythons]
+    def build_steps(self):
+        return [config.build for config in self.pythons]
 
-  def post_tests_steps(self):
-    if self.config.build_config != 'gcov':
-      return []
-    else:
-      return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
+    def post_tests_steps(self):
+        if self.config.build_config != 'gcov':
+            return []
+        else:
+            return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
 
-  def makefile_name(self):
-    return 'Makefile'
+    def makefile_name(self):
+        return 'Makefile'
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/python_%s_%s' % (self.python_manager_name(), _docker_arch_suffix(self.args.arch))
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/python_%s_%s' % (
+            self.python_manager_name(), _docker_arch_suffix(self.args.arch))
 
-  def python_manager_name(self):
-    if self.args.compiler in ['python3.5', 'python3.6']:
-      return 'pyenv'
-    elif self.args.compiler == 'python_alpine':
-      return 'alpine'
-    else:
-      return 'jessie'
+    def python_manager_name(self):
+        if self.args.compiler in ['python3.5', 'python3.6']:
+            return 'pyenv'
+        elif self.args.compiler == 'python_alpine':
+            return 'alpine'
+        else:
+            return 'jessie'
 
-  def _get_pythons(self, args):
-    if args.arch == 'x86':
-      bits = '32'
-    else:
-      bits = '64'
+    def _get_pythons(self, args):
+        if args.arch == 'x86':
+            bits = '32'
+        else:
+            bits = '64'
 
-    if os.name == 'nt':
-      shell = ['bash']
-      builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python_msys2.sh')]
-      builder_prefix_arguments = ['MINGW{}'.format(bits)]
-      venv_relative_python = ['Scripts/python.exe']
-      toolchain = ['mingw32']
-    else:
-      shell = []
-      builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python.sh')]
-      builder_prefix_arguments = []
-      venv_relative_python = ['bin/python']
-      toolchain = ['unix']
+        if os.name == 'nt':
+            shell = ['bash']
+            builder = [
+                os.path.abspath(
+                    'tools/run_tests/helper_scripts/build_python_msys2.sh')
+            ]
+            builder_prefix_arguments = ['MINGW{}'.format(bits)]
+            venv_relative_python = ['Scripts/python.exe']
+            toolchain = ['mingw32']
+        else:
+            shell = []
+            builder = [
+                os.path.abspath(
+                    'tools/run_tests/helper_scripts/build_python.sh')
+            ]
+            builder_prefix_arguments = []
+            venv_relative_python = ['bin/python']
+            toolchain = ['unix']
 
-    runner = [os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')]
-    config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments,
-                              venv_relative_python, toolchain, runner)
-    python27_config = _python_config_generator(name='py27', major='2',
-                                               minor='7', bits=bits,
-                                               config_vars=config_vars)
-    python34_config = _python_config_generator(name='py34', major='3',
-                                               minor='4', bits=bits,
-                                               config_vars=config_vars)
-    python35_config = _python_config_generator(name='py35', major='3',
-                                               minor='5', bits=bits,
-                                               config_vars=config_vars)
-    python36_config = _python_config_generator(name='py36', major='3',
-                                               minor='6', bits=bits,
-                                               config_vars=config_vars)
-    pypy27_config = _pypy_config_generator(name='pypy', major='2',
-                                           config_vars=config_vars)
-    pypy32_config = _pypy_config_generator(name='pypy3', major='3',
-                                           config_vars=config_vars)
+        runner = [
+            os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
+        ]
+        config_vars = _PythonConfigVars(shell, builder,
+                                        builder_prefix_arguments,
+                                        venv_relative_python, toolchain, runner)
+        python27_config = _python_config_generator(
+            name='py27',
+            major='2',
+            minor='7',
+            bits=bits,
+            config_vars=config_vars)
+        python34_config = _python_config_generator(
+            name='py34',
+            major='3',
+            minor='4',
+            bits=bits,
+            config_vars=config_vars)
+        python35_config = _python_config_generator(
+            name='py35',
+            major='3',
+            minor='5',
+            bits=bits,
+            config_vars=config_vars)
+        python36_config = _python_config_generator(
+            name='py36',
+            major='3',
+            minor='6',
+            bits=bits,
+            config_vars=config_vars)
+        pypy27_config = _pypy_config_generator(
+            name='pypy', major='2', config_vars=config_vars)
+        pypy32_config = _pypy_config_generator(
+            name='pypy3', major='3', config_vars=config_vars)
 
-    if args.compiler == 'default':
-      if os.name == 'nt':
-        return (python35_config,)
-      else:
-        return (python27_config, python34_config,)
-    elif args.compiler == 'python2.7':
-      return (python27_config,)
-    elif args.compiler == 'python3.4':
-      return (python34_config,)
-    elif args.compiler == 'python3.5':
-      return (python35_config,)
-    elif args.compiler == 'python3.6':
-      return (python36_config,)
-    elif args.compiler == 'pypy':
-      return (pypy27_config,)
-    elif args.compiler == 'pypy3':
-      return (pypy32_config,)
-    elif args.compiler == 'python_alpine':
-      return (python27_config,)
-    elif args.compiler == 'all_the_cpythons':
-      return (python27_config, python34_config, python35_config,
-              python36_config,)
-    else:
-      raise Exception('Compiler %s not supported.' % args.compiler)
+        if args.compiler == 'default':
+            if os.name == 'nt':
+                return (python35_config,)
+            else:
+                return (python27_config, python34_config,)
+        elif args.compiler == 'python2.7':
+            return (python27_config,)
+        elif args.compiler == 'python3.4':
+            return (python34_config,)
+        elif args.compiler == 'python3.5':
+            return (python35_config,)
+        elif args.compiler == 'python3.6':
+            return (python36_config,)
+        elif args.compiler == 'pypy':
+            return (pypy27_config,)
+        elif args.compiler == 'pypy3':
+            return (pypy32_config,)
+        elif args.compiler == 'python_alpine':
+            return (python27_config,)
+        elif args.compiler == 'all_the_cpythons':
+            return (python27_config, python34_config, python35_config,
+                    python36_config,)
+        else:
+            raise Exception('Compiler %s not supported.' % args.compiler)
 
-  def __str__(self):
-    return 'python'
+    def __str__(self):
+        return 'python'
 
 
 class RubyLanguage(object):
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    _check_compiler(self.args.compiler, ['default'])
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        _check_compiler(self.args.compiler, ['default'])
 
-  def test_specs(self):
-    tests = [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
-                                  timeout_seconds=10*60,
-                                  environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
-    tests.append(self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
-                 timeout_seconds=10*60,
-                 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
-    return tests
+    def test_specs(self):
+        tests = [
+            self.config.job_spec(
+                ['tools/run_tests/helper_scripts/run_ruby.sh'],
+                timeout_seconds=10 * 60,
+                environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+        ]
+        tests.append(
+            self.config.job_spec(
+                ['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
+                timeout_seconds=10 * 60,
+                environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+        return tests
 
-  def pre_build_steps(self):
-    return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
+    def pre_build_steps(self):
+        return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
 
-  def make_targets(self):
-    return []
+    def make_targets(self):
+        return []
 
-  def make_options(self):
-    return []
+    def make_options(self):
+        return []
 
-  def build_steps(self):
-    return [['tools/run_tests/helper_scripts/build_ruby.sh']]
+    def build_steps(self):
+        return [['tools/run_tests/helper_scripts/build_ruby.sh']]
 
-  def post_tests_steps(self):
-    return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
+    def post_tests_steps(self):
+        return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
 
-  def makefile_name(self):
-    return 'Makefile'
+    def makefile_name(self):
+        return 'Makefile'
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(self.args.arch)
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
+            self.args.arch)
 
-  def __str__(self):
-    return 'ruby'
+    def __str__(self):
+        return 'ruby'
 
 
 class CSharpLanguage(object):
 
-  def __init__(self):
-    self.platform = platform_string()
+    def __init__(self):
+        self.platform = platform_string()
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    if self.platform == 'windows':
-      _check_compiler(self.args.compiler, ['coreclr', 'default'])
-      _check_arch(self.args.arch, ['default'])
-      self._cmake_arch_option = 'x64'
-      self._make_options = []
-    else:
-      _check_compiler(self.args.compiler, ['default', 'coreclr'])
-      self._docker_distro = 'jessie'
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        if self.platform == 'windows':
+            _check_compiler(self.args.compiler, ['coreclr', 'default'])
+            _check_arch(self.args.arch, ['default'])
+            self._cmake_arch_option = 'x64'
+            self._make_options = []
+        else:
+            _check_compiler(self.args.compiler, ['default', 'coreclr'])
+            self._docker_distro = 'jessie'
 
-      if self.platform == 'mac':
-        # TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
-        self._make_options = ['EMBED_OPENSSL=true']
-        if self.args.compiler != 'coreclr':
-          # On Mac, official distribution of mono is 32bit.
-          self._make_options += ['ARCH_FLAGS=-m32', 'LDFLAGS=-m32']
-      else:
-        self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+            if self.platform == 'mac':
+                # TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
+                self._make_options = ['EMBED_OPENSSL=true']
+                if self.args.compiler != 'coreclr':
+                    # On Mac, official distribution of mono is 32bit.
+                    self._make_options += ['ARCH_FLAGS=-m32', 'LDFLAGS=-m32']
+            else:
+                self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
 
-  def test_specs(self):
-    with open('src/csharp/tests.json') as f:
-      tests_by_assembly = json.load(f)
+    def test_specs(self):
+        with open('src/csharp/tests.json') as f:
+            tests_by_assembly = json.load(f)
 
-    msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
-    nunit_args = ['--labels=All', '--noresult', '--workers=1']
-    assembly_subdir = 'bin/%s' % msbuild_config
-    assembly_extension = '.exe'
+        msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
+        nunit_args = ['--labels=All', '--noresult', '--workers=1']
+        assembly_subdir = 'bin/%s' % msbuild_config
+        assembly_extension = '.exe'
 
-    if self.args.compiler == 'coreclr':
-      assembly_subdir += '/netcoreapp1.0'
-      runtime_cmd = ['dotnet', 'exec']
-      assembly_extension = '.dll'
-    else:
-      assembly_subdir += '/net45'
-      if self.platform == 'windows':
-        runtime_cmd = []
-      else:
-        runtime_cmd = ['mono']
+        if self.args.compiler == 'coreclr':
+            assembly_subdir += '/netcoreapp1.0'
+            runtime_cmd = ['dotnet', 'exec']
+            assembly_extension = '.dll'
+        else:
+            assembly_subdir += '/net45'
+            if self.platform == 'windows':
+                runtime_cmd = []
+            else:
+                runtime_cmd = ['mono']
 
-    specs = []
-    for assembly in six.iterkeys(tests_by_assembly):
-      assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
-                                                 assembly_subdir,
-                                                 assembly,
-                                                 assembly_extension)
-      if self.config.build_config != 'gcov' or self.platform != 'windows':
-        # normally, run each test as a separate process
-        for test in tests_by_assembly[assembly]:
-          cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args
-          specs.append(self.config.job_spec(cmdline,
-                                            shortname='csharp.%s' % test,
-                                            environ=_FORCE_ENVIRON_FOR_WRAPPERS))
-      else:
-        # For C# test coverage, run all tests from the same assembly at once
-        # using OpenCover.Console (only works on Windows).
-        cmdline = ['src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
-                   '-target:%s' % assembly_file,
-                   '-targetdir:src\\csharp',
-                   '-targetargs:%s' % ' '.join(nunit_args),
-                   '-filter:+[Grpc.Core]*',
-                   '-register:user',
-                   '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly]
+        specs = []
+        for assembly in six.iterkeys(tests_by_assembly):
+            assembly_file = 'src/csharp/%s/%s/%s%s' % (
+                assembly, assembly_subdir, assembly, assembly_extension)
+            if self.config.build_config != 'gcov' or self.platform != 'windows':
+                # normally, run each test as a separate process
+                for test in tests_by_assembly[assembly]:
+                    cmdline = runtime_cmd + [assembly_file, '--test=%s' % test
+                                            ] + nunit_args
+                    specs.append(
+                        self.config.job_spec(
+                            cmdline,
+                            shortname='csharp.%s' % test,
+                            environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+            else:
+                # For C# test coverage, run all tests from the same assembly at once
+                # using OpenCover.Console (only works on Windows).
+                cmdline = [
+                    'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
+                    '-target:%s' % assembly_file, '-targetdir:src\\csharp',
+                    '-targetargs:%s' % ' '.join(nunit_args),
+                    '-filter:+[Grpc.Core]*', '-register:user',
+                    '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
+                ]
 
-        # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
-        # to prevent problems with registering the profiler.
-        run_exclusive = 1000000
-        specs.append(self.config.job_spec(cmdline,
-                                          shortname='csharp.coverage.%s' % assembly,
-                                          cpu_cost=run_exclusive,
-                                          environ=_FORCE_ENVIRON_FOR_WRAPPERS))
-    return specs
+                # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
+                # to prevent problems with registering the profiler.
+                run_exclusive = 1000000
+                specs.append(
+                    self.config.job_spec(
+                        cmdline,
+                        shortname='csharp.coverage.%s' % assembly,
+                        cpu_cost=run_exclusive,
+                        environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+        return specs
 
-  def pre_build_steps(self):
-    if self.platform == 'windows':
-      return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat', self._cmake_arch_option]]
-    else:
-      return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
+    def pre_build_steps(self):
+        if self.platform == 'windows':
+            return [[
+                'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
+                self._cmake_arch_option
+            ]]
+        else:
+            return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
 
-  def make_targets(self):
-    return ['grpc_csharp_ext']
+    def make_targets(self):
+        return ['grpc_csharp_ext']
 
-  def make_options(self):
-    return self._make_options;
+    def make_options(self):
+        return self._make_options
 
-  def build_steps(self):
-    if self.platform == 'windows':
-      return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
-    else:
-      return [['tools/run_tests/helper_scripts/build_csharp.sh']]
+    def build_steps(self):
+        if self.platform == 'windows':
+            return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
+        else:
+            return [['tools/run_tests/helper_scripts/build_csharp.sh']]
 
-  def post_tests_steps(self):
-    if self.platform == 'windows':
-      return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
-    else:
-      return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
+    def post_tests_steps(self):
+        if self.platform == 'windows':
+            return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
+        else:
+            return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
 
-  def makefile_name(self):
-    if self.platform == 'windows':
-      return 'cmake/build/%s/Makefile' % self._cmake_arch_option
-    else:
-      return 'Makefile'
+    def makefile_name(self):
+        if self.platform == 'windows':
+            return 'cmake/build/%s/Makefile' % self._cmake_arch_option
+        else:
+            return 'Makefile'
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/csharp_%s_%s' % (self._docker_distro,
-                                                   _docker_arch_suffix(self.args.arch))
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/csharp_%s_%s' % (
+            self._docker_distro, _docker_arch_suffix(self.args.arch))
 
-  def __str__(self):
-    return 'csharp'
+    def __str__(self):
+        return 'csharp'
 
 
 class ObjCLanguage(object):
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    _check_compiler(self.args.compiler, ['default'])
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        _check_compiler(self.args.compiler, ['default'])
 
-  def test_specs(self):
-    return [
-        self.config.job_spec(['src/objective-c/tests/run_tests.sh'],
-                              timeout_seconds=60*60,
-                              shortname='objc-tests',
-                              cpu_cost=1e6,
-                              environ=_FORCE_ENVIRON_FOR_WRAPPERS),
-        self.config.job_spec(['src/objective-c/tests/run_plugin_tests.sh'],
-                              timeout_seconds=60*60,
-                              shortname='objc-plugin-tests',
-                              cpu_cost=1e6,
-                              environ=_FORCE_ENVIRON_FOR_WRAPPERS),
-        self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
-                              timeout_seconds=10*60,
-                              shortname='objc-build-example-helloworld',
-                              cpu_cost=1e6,
-                              environ={'SCHEME': 'HelloWorld',
-                                       'EXAMPLE_PATH': 'examples/objective-c/helloworld'}),
-        self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
-                              timeout_seconds=10*60,
-                              shortname='objc-build-example-routeguide',
-                              cpu_cost=1e6,
-                              environ={'SCHEME': 'RouteGuideClient',
-                                       'EXAMPLE_PATH': 'examples/objective-c/route_guide'}),
-        self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
-                              timeout_seconds=10*60,
-                              shortname='objc-build-example-authsample',
-                              cpu_cost=1e6,
-                              environ={'SCHEME': 'AuthSample',
-                                       'EXAMPLE_PATH': 'examples/objective-c/auth_sample'}),
-        self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
-                              timeout_seconds=10*60,
-                              shortname='objc-build-example-sample',
-                              cpu_cost=1e6,
-                              environ={'SCHEME': 'Sample',
-                                       'EXAMPLE_PATH': 'src/objective-c/examples/Sample'}),
-        self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
-                              timeout_seconds=10*60,
-                              shortname='objc-build-example-sample-frameworks',
-                              cpu_cost=1e6,
-                              environ={'SCHEME': 'Sample',
-                                       'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
-                                       'FRAMEWORKS': 'YES'}),
-        self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
-                              timeout_seconds=10*60,
-                              shortname='objc-build-example-switftsample',
-                              cpu_cost=1e6,
-                              environ={'SCHEME': 'SwiftSample',
-                                       'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'}),
-    ]
+    def test_specs(self):
+        return [
+            self.config.job_spec(
+                ['src/objective-c/tests/run_tests.sh'],
+                timeout_seconds=60 * 60,
+                shortname='objc-tests',
+                cpu_cost=1e6,
+                environ=_FORCE_ENVIRON_FOR_WRAPPERS),
+            self.config.job_spec(
+                ['src/objective-c/tests/run_plugin_tests.sh'],
+                timeout_seconds=60 * 60,
+                shortname='objc-plugin-tests',
+                cpu_cost=1e6,
+                environ=_FORCE_ENVIRON_FOR_WRAPPERS),
+            self.config.job_spec(
+                ['src/objective-c/tests/build_one_example.sh'],
+                timeout_seconds=10 * 60,
+                shortname='objc-build-example-helloworld',
+                cpu_cost=1e6,
+                environ={
+                    'SCHEME': 'HelloWorld',
+                    'EXAMPLE_PATH': 'examples/objective-c/helloworld'
+                }),
+            self.config.job_spec(
+                ['src/objective-c/tests/build_one_example.sh'],
+                timeout_seconds=10 * 60,
+                shortname='objc-build-example-routeguide',
+                cpu_cost=1e6,
+                environ={
+                    'SCHEME': 'RouteGuideClient',
+                    'EXAMPLE_PATH': 'examples/objective-c/route_guide'
+                }),
+            self.config.job_spec(
+                ['src/objective-c/tests/build_one_example.sh'],
+                timeout_seconds=10 * 60,
+                shortname='objc-build-example-authsample',
+                cpu_cost=1e6,
+                environ={
+                    'SCHEME': 'AuthSample',
+                    'EXAMPLE_PATH': 'examples/objective-c/auth_sample'
+                }),
+            self.config.job_spec(
+                ['src/objective-c/tests/build_one_example.sh'],
+                timeout_seconds=10 * 60,
+                shortname='objc-build-example-sample',
+                cpu_cost=1e6,
+                environ={
+                    'SCHEME': 'Sample',
+                    'EXAMPLE_PATH': 'src/objective-c/examples/Sample'
+                }),
+            self.config.job_spec(
+                ['src/objective-c/tests/build_one_example.sh'],
+                timeout_seconds=10 * 60,
+                shortname='objc-build-example-sample-frameworks',
+                cpu_cost=1e6,
+                environ={
+                    'SCHEME': 'Sample',
+                    'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
+                    'FRAMEWORKS': 'YES'
+                }),
+            self.config.job_spec(
+                ['src/objective-c/tests/build_one_example.sh'],
+                timeout_seconds=10 * 60,
+                shortname='objc-build-example-switftsample',
+                cpu_cost=1e6,
+                environ={
+                    'SCHEME': 'SwiftSample',
+                    'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
+                }),
+        ]
 
-  def pre_build_steps(self):
-    return []
+    def pre_build_steps(self):
+        return []
 
-  def make_targets(self):
-    return ['interop_server']
+    def make_targets(self):
+        return ['interop_server']
 
-  def make_options(self):
-    return []
+    def make_options(self):
+        return []
 
-  def build_steps(self):
-    return [['src/objective-c/tests/build_tests.sh']]
+    def build_steps(self):
+        return [['src/objective-c/tests/build_tests.sh']]
 
-  def post_tests_steps(self):
-    return []
+    def post_tests_steps(self):
+        return []
 
-  def makefile_name(self):
-    return 'Makefile'
+    def makefile_name(self):
+        return 'Makefile'
 
-  def dockerfile_dir(self):
-    return None
+    def dockerfile_dir(self):
+        return None
 
-  def __str__(self):
-    return 'objc'
+    def __str__(self):
+        return 'objc'
 
 
 class Sanity(object):
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    _check_compiler(self.args.compiler, ['default'])
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        _check_compiler(self.args.compiler, ['default'])
 
-  def test_specs(self):
-    import yaml
-    with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
-      environ={'TEST': 'true'}
-      if _is_use_docker_child():
-        environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
-      return [self.config.job_spec(cmd['script'].split(),
-                                   timeout_seconds=30*60,
-                                   environ=environ,
-                                   cpu_cost=cmd.get('cpu_cost', 1))
-              for cmd in yaml.load(f)]
+    def test_specs(self):
+        import yaml
+        with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
+            environ = {'TEST': 'true'}
+            if _is_use_docker_child():
+                environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
+            return [
+                self.config.job_spec(
+                    cmd['script'].split(),
+                    timeout_seconds=30 * 60,
+                    environ=environ,
+                    cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)
+            ]
 
-  def pre_build_steps(self):
-    return []
+    def pre_build_steps(self):
+        return []
 
-  def make_targets(self):
-    return ['run_dep_checks']
+    def make_targets(self):
+        return ['run_dep_checks']
 
-  def make_options(self):
-    return []
+    def make_options(self):
+        return []
 
-  def build_steps(self):
-    return []
+    def build_steps(self):
+        return []
 
-  def post_tests_steps(self):
-    return []
+    def post_tests_steps(self):
+        return []
 
-  def makefile_name(self):
-    return 'Makefile'
+    def makefile_name(self):
+        return 'Makefile'
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/sanity'
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/sanity'
 
-  def __str__(self):
-    return 'sanity'
+    def __str__(self):
+        return 'sanity'
+
 
 # different configurations we can run under
 with open('tools/run_tests/generated/configs.json') as f:
-  _CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
-
+    _CONFIGS = dict((cfg['config'], Config(**cfg))
+                    for cfg in ast.literal_eval(f.read()))
 
 _LANGUAGES = {
     'c++': CLanguage('cxx', 'c++'),
@@ -1033,60 +1157,61 @@
     'python': PythonLanguage(),
     'ruby': RubyLanguage(),
     'csharp': CSharpLanguage(),
-    'objc' : ObjCLanguage(),
+    'objc': ObjCLanguage(),
     'sanity': Sanity()
-    }
-
+}
 
 _MSBUILD_CONFIG = {
     'dbg': 'Debug',
     'opt': 'Release',
     'gcov': 'Debug',
-    }
+}
 
 
 def _windows_arch_option(arch):
-  """Returns msbuild cmdline option for selected architecture."""
-  if arch == 'default' or arch == 'x86':
-    return '/p:Platform=Win32'
-  elif arch == 'x64':
-    return '/p:Platform=x64'
-  else:
-    print('Architecture %s not supported.' % arch)
-    sys.exit(1)
+    """Returns msbuild cmdline option for selected architecture."""
+    if arch == 'default' or arch == 'x86':
+        return '/p:Platform=Win32'
+    elif arch == 'x64':
+        return '/p:Platform=x64'
+    else:
+        print('Architecture %s not supported.' % arch)
+        sys.exit(1)
 
 
 def _check_arch_option(arch):
-  """Checks that architecture option is valid."""
-  if platform_string() == 'windows':
-    _windows_arch_option(arch)
-  elif platform_string() == 'linux':
-    # On linux, we need to be running under docker with the right architecture.
-    runtime_arch = platform.architecture()[0]
-    if arch == 'default':
-      return
-    elif runtime_arch == '64bit' and arch == 'x64':
-      return
-    elif runtime_arch == '32bit' and arch == 'x86':
-      return
+    """Checks that architecture option is valid."""
+    if platform_string() == 'windows':
+        _windows_arch_option(arch)
+    elif platform_string() == 'linux':
+        # On linux, we need to be running under docker with the right architecture.
+        runtime_arch = platform.architecture()[0]
+        if arch == 'default':
+            return
+        elif runtime_arch == '64bit' and arch == 'x64':
+            return
+        elif runtime_arch == '32bit' and arch == 'x86':
+            return
+        else:
+            print('Architecture %s does not match current runtime architecture.'
+                  % arch)
+            sys.exit(1)
     else:
-      print('Architecture %s does not match current runtime architecture.' % arch)
-      sys.exit(1)
-  else:
-    if args.arch != 'default':
-      print('Architecture %s not supported on current platform.' % args.arch)
-      sys.exit(1)
+        if args.arch != 'default':
+            print('Architecture %s not supported on current platform.' %
+                  args.arch)
+            sys.exit(1)
 
 
 def _docker_arch_suffix(arch):
-  """Returns suffix to dockerfile dir to use."""
-  if arch == 'default' or arch == 'x64':
-    return 'x64'
-  elif arch == 'x86':
-    return 'x86'
-  else:
-    print('Architecture %s not supported with current settings.' % arch)
-    sys.exit(1)
+    """Returns suffix to dockerfile dir to use."""
+    if arch == 'default' or arch == 'x64':
+        return 'x64'
+    elif arch == 'x86':
+        return 'x86'
+    else:
+        print('Architecture %s not supported with current settings.' % arch)
+        sys.exit(1)
 
 
 def runs_per_test_type(arg_str):
@@ -1111,478 +1236,581 @@
 
 
 def percent_type(arg_str):
-  pct = float(arg_str)
-  if pct > 100 or pct < 0:
-    raise argparse.ArgumentTypeError(
-        "'%f' is not a valid percentage in the [0, 100] range" % pct)
-  return pct
+    pct = float(arg_str)
+    if pct > 100 or pct < 0:
+        raise argparse.ArgumentTypeError(
+            "'%f' is not a valid percentage in the [0, 100] range" % pct)
+    return pct
+
 
 # This is math.isclose in python >= 3.5
 def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
-      return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
+    return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
 
 
 # parse command line
 argp = argparse.ArgumentParser(description='Run grpc tests.')
-argp.add_argument('-c', '--config',
-                  choices=sorted(_CONFIGS.keys()),
-                  default='opt')
-argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
-        help='A positive integer or "inf". If "inf", all tests will run in an '
-             'infinite loop. Especially useful in combination with "-f"')
+argp.add_argument(
+    '-c', '--config', choices=sorted(_CONFIGS.keys()), default='opt')
+argp.add_argument(
+    '-n',
+    '--runs_per_test',
+    default=1,
+    type=runs_per_test_type,
+    help='A positive integer or "inf". If "inf", all tests will run in an '
+    'infinite loop. Especially useful in combination with "-f"')
 argp.add_argument('-r', '--regex', default='.*', type=str)
 argp.add_argument('--regex_exclude', default='', type=str)
 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
 argp.add_argument('-s', '--slowdown', default=1.0, type=float)
-argp.add_argument('-p', '--sample_percent', default=100.0, type=percent_type,
-                  help='Run a random sample with that percentage of tests')
-argp.add_argument('-f', '--forever',
-                  default=False,
-                  action='store_const',
-                  const=True)
-argp.add_argument('-t', '--travis',
-                  default=False,
-                  action='store_const',
-                  const=True)
-argp.add_argument('--newline_on_success',
-                  default=False,
-                  action='store_const',
-                  const=True)
-argp.add_argument('-l', '--language',
-                  choices=['all'] + sorted(_LANGUAGES.keys()),
-                  nargs='+',
-                  default=['all'])
-argp.add_argument('-S', '--stop_on_failure',
-                  default=False,
-                  action='store_const',
-                  const=True)
-argp.add_argument('--use_docker',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Run all the tests under docker. That provides ' +
-                  'additional isolation and prevents the need to install ' +
-                  'language specific prerequisites. Only available on Linux.')
-argp.add_argument('--allow_flakes',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
-argp.add_argument('--arch',
-                  choices=['default', 'x86', 'x64'],
-                  default='default',
-                  help='Selects architecture to target. For some platforms "default" is the only supported choice.')
-argp.add_argument('--compiler',
-                  choices=['default',
-                           'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc_musl',
-                           'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
-                           'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine', 'all_the_cpythons',
-                           'electron1.3', 'electron1.6',
-                           'coreclr',
-                           'cmake', 'cmake_vs2015', 'cmake_vs2017'],
-                  default='default',
-                  help='Selects compiler to use. Allowed values depend on the platform and language.')
-argp.add_argument('--iomgr_platform',
-                  choices=['native', 'uv'],
-                  default='native',
-                  help='Selects iomgr platform to build on')
-argp.add_argument('--build_only',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Perform all the build steps but don\'t run any tests.')
-argp.add_argument('--measure_cpu_costs', default=False, action='store_const', const=True,
-                  help='Measure the cpu costs of tests')
-argp.add_argument('--update_submodules', default=[], nargs='*',
-                  help='Update some submodules before building. If any are updated, also run generate_projects. ' +
-                       'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.')
+argp.add_argument(
+    '-p',
+    '--sample_percent',
+    default=100.0,
+    type=percent_type,
+    help='Run a random sample with that percentage of tests')
+argp.add_argument(
+    '-f', '--forever', default=False, action='store_const', const=True)
+argp.add_argument(
+    '-t', '--travis', default=False, action='store_const', const=True)
+argp.add_argument(
+    '--newline_on_success', default=False, action='store_const', const=True)
+argp.add_argument(
+    '-l',
+    '--language',
+    choices=['all'] + sorted(_LANGUAGES.keys()),
+    nargs='+',
+    default=['all'])
+argp.add_argument(
+    '-S', '--stop_on_failure', default=False, action='store_const', const=True)
+argp.add_argument(
+    '--use_docker',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Run all the tests under docker. That provides ' +
+    'additional isolation and prevents the need to install ' +
+    'language specific prerequisites. Only available on Linux.')
+argp.add_argument(
+    '--allow_flakes',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Allow flaky tests to show as passing (re-runs failed tests up to five times)'
+)
+argp.add_argument(
+    '--arch',
+    choices=['default', 'x86', 'x64'],
+    default='default',
+    help='Selects architecture to target. For some platforms "default" is the only supported choice.'
+)
+argp.add_argument(
+    '--compiler',
+    choices=[
+        'default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc_musl',
+        'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'python2.7',
+        'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine',
+        'all_the_cpythons', 'electron1.3', 'electron1.6', 'coreclr', 'cmake',
+        'cmake_vs2015', 'cmake_vs2017'
+    ],
+    default='default',
+    help='Selects compiler to use. Allowed values depend on the platform and language.'
+)
+argp.add_argument(
+    '--iomgr_platform',
+    choices=['native', 'uv'],
+    default='native',
+    help='Selects iomgr platform to build on')
+argp.add_argument(
+    '--build_only',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Perform all the build steps but don\'t run any tests.')
+argp.add_argument(
+    '--measure_cpu_costs',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Measure the cpu costs of tests')
+argp.add_argument(
+    '--update_submodules',
+    default=[],
+    nargs='*',
+    help='Update some submodules before building. If any are updated, also run generate_projects. '
+    +
+    'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
+)
 argp.add_argument('-a', '--antagonists', default=0, type=int)
-argp.add_argument('-x', '--xml_report', default=None, type=str,
-        help='Generates a JUnit-compatible XML report')
-argp.add_argument('--report_suite_name', default='tests', type=str,
-        help='Test suite name to use in generated JUnit XML report')
-argp.add_argument('--quiet_success',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. ' +
-                       'Useful when running many iterations of each test (argument -n).')
-argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
-                  help='Don\'t try to iterate over many polling strategies when they exist')
-argp.add_argument('--force_use_pollers', default=None, type=str,
-                  help='Only use the specified comma-delimited list of polling engines. '
-                  'Example: --force_use_pollers epollsig,poll '
-                  ' (This flag has no effect if --force_default_poller flag is also used)')
-argp.add_argument('--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
-argp.add_argument('--bq_result_table',
-                  default='',
-                  type=str,
-                  nargs='?',
-                  help='Upload test results to a specified BQ table.')
-argp.add_argument('--disable_auto_set_flakes', default=False, const=True, action='store_const',
-                  help='Disable rerunning historically flaky tests')
+argp.add_argument(
+    '-x',
+    '--xml_report',
+    default=None,
+    type=str,
+    help='Generates a JUnit-compatible XML report')
+argp.add_argument(
+    '--report_suite_name',
+    default='tests',
+    type=str,
+    help='Test suite name to use in generated JUnit XML report')
+argp.add_argument(
+    '--quiet_success',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+    + 'Useful when running many iterations of each test (argument -n).')
+argp.add_argument(
+    '--force_default_poller',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Don\'t try to iterate over many polling strategies when they exist')
+argp.add_argument(
+    '--force_use_pollers',
+    default=None,
+    type=str,
+    help='Only use the specified comma-delimited list of polling engines. '
+    'Example: --force_use_pollers epollsig,poll '
+    ' (This flag has no effect if --force_default_poller flag is also used)')
+argp.add_argument(
+    '--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
+argp.add_argument(
+    '--bq_result_table',
+    default='',
+    type=str,
+    nargs='?',
+    help='Upload test results to a specified BQ table.')
+argp.add_argument(
+    '--disable_auto_set_flakes',
+    default=False,
+    const=True,
+    action='store_const',
+    help='Disable rerunning historically flaky tests')
 args = argp.parse_args()
 
 flaky_tests = set()
 shortname_to_cpu = {}
 if not args.disable_auto_set_flakes:
-  try:
-    for test in get_bqtest_data():
-      if test.flaky: flaky_tests.add(test.name)
-      if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
-  except:
-    print("Unexpected error getting flaky tests: %s" % traceback.format_exc())
+    try:
+        for test in get_bqtest_data():
+            if test.flaky: flaky_tests.add(test.name)
+            if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
+    except:
+        print("Unexpected error getting flaky tests: %s" %
+              traceback.format_exc())
 
 if args.force_default_poller:
-  _POLLING_STRATEGIES = {}
+    _POLLING_STRATEGIES = {}
 elif args.force_use_pollers:
-  _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
+    _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
 
 jobset.measure_cpu_costs = args.measure_cpu_costs
 
 # update submodules if necessary
 need_to_regenerate_projects = False
 for spec in args.update_submodules:
-  spec = spec.split(':', 1)
-  if len(spec) == 1:
-    submodule = spec[0]
-    branch = 'master'
-  elif len(spec) == 2:
-    submodule = spec[0]
-    branch = spec[1]
-  cwd = 'third_party/%s' % submodule
-  def git(cmd, cwd=cwd):
-    print('in %s: git %s' % (cwd, cmd))
-    run_shell_command('git %s' % cmd, cwd=cwd)
-  git('fetch')
-  git('checkout %s' % branch)
-  git('pull origin %s' % branch)
-  if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
-    need_to_regenerate_projects = True
-if need_to_regenerate_projects:
-  if jobset.platform_string() == 'linux':
-    run_shell_command('tools/buildgen/generate_projects.sh')
-  else:
-    print('WARNING: may need to regenerate projects, but since we are not on')
-    print('         Linux this step is being skipped. Compilation MAY fail.')
+    spec = spec.split(':', 1)
+    if len(spec) == 1:
+        submodule = spec[0]
+        branch = 'master'
+    elif len(spec) == 2:
+        submodule = spec[0]
+        branch = spec[1]
+    cwd = 'third_party/%s' % submodule
 
+    def git(cmd, cwd=cwd):
+        print('in %s: git %s' % (cwd, cmd))
+        run_shell_command('git %s' % cmd, cwd=cwd)
+
+    git('fetch')
+    git('checkout %s' % branch)
+    git('pull origin %s' % branch)
+    if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
+        need_to_regenerate_projects = True
+if need_to_regenerate_projects:
+    if jobset.platform_string() == 'linux':
+        run_shell_command('tools/buildgen/generate_projects.sh')
+    else:
+        print(
+            'WARNING: may need to regenerate projects, but since we are not on')
+        print(
+            '         Linux this step is being skipped. Compilation MAY fail.')
 
 # grab config
 run_config = _CONFIGS[args.config]
 build_config = run_config.build_config
 
 if args.travis:
-  _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
+    _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
 
 if 'all' in args.language:
-  lang_list = _LANGUAGES.keys()
+    lang_list = _LANGUAGES.keys()
 else:
-  lang_list = args.language
+    lang_list = args.language
 # We don't support code coverage on some languages
 if 'gcov' in args.config:
-  for bad in ['objc', 'sanity']:
-    if bad in lang_list:
-      lang_list.remove(bad)
+    for bad in ['objc', 'sanity']:
+        if bad in lang_list:
+            lang_list.remove(bad)
 
 languages = set(_LANGUAGES[l] for l in lang_list)
 for l in languages:
-  l.configure(run_config, args)
+    l.configure(run_config, args)
 
-language_make_options=[]
+language_make_options = []
 if any(language.make_options() for language in languages):
-  if not 'gcov' in args.config and len(languages) != 1:
-    print('languages with custom make options cannot be built simultaneously with other languages')
-    sys.exit(1)
-  else:
-    # Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
-    # together, and is only used under gcov. All other configs should build languages individually.
-    language_make_options = list(set([make_option for lang in languages for make_option in lang.make_options()]))
+    if not 'gcov' in args.config and len(languages) != 1:
+        print(
+            'languages with custom make options cannot be built simultaneously with other languages'
+        )
+        sys.exit(1)
+    else:
+        # Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
+        # together, and is only used under gcov. All other configs should build languages individually.
+        language_make_options = list(
+            set([
+                make_option
+                for lang in languages for make_option in lang.make_options()
+            ]))
 
 if args.use_docker:
-  if not args.travis:
-    print('Seen --use_docker flag, will run tests under docker.')
-    print('')
-    print('IMPORTANT: The changes you are testing need to be locally committed')
-    print('because only the committed changes in the current branch will be')
-    print('copied to the docker environment.')
-    time.sleep(5)
+    if not args.travis:
+        print('Seen --use_docker flag, will run tests under docker.')
+        print('')
+        print(
+            'IMPORTANT: The changes you are testing need to be locally committed'
+        )
+        print(
+            'because only the committed changes in the current branch will be')
+        print('copied to the docker environment.')
+        time.sleep(5)
 
-  dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
-  if len(dockerfile_dirs) > 1:
-    if 'gcov' in args.config:
-      dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
-      print ('Using multilang_jessie_x64 docker image for code coverage for '
-             'all languages.')
+    dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
+    if len(dockerfile_dirs) > 1:
+        if 'gcov' in args.config:
+            dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
+            print(
+                'Using multilang_jessie_x64 docker image for code coverage for '
+                'all languages.')
+        else:
+            print(
+                'Languages to be tested require running under different docker '
+                'images.')
+            sys.exit(1)
     else:
-      print ('Languages to be tested require running under different docker '
-             'images.')
-      sys.exit(1)
-  else:
-    dockerfile_dir = next(iter(dockerfile_dirs))
+        dockerfile_dir = next(iter(dockerfile_dirs))
 
-  child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
-  run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(child_argv[1:])
+    child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
+    run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
+        child_argv[1:])
 
-  env = os.environ.copy()
-  env['RUN_TESTS_COMMAND'] = run_tests_cmd
-  env['DOCKERFILE_DIR'] = dockerfile_dir
-  env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
-  if args.xml_report:
-    env['XML_REPORT'] = args.xml_report
-  if not args.travis:
-    env['TTY_FLAG'] = '-t'  # enables Ctrl-C when not on Jenkins.
+    env = os.environ.copy()
+    env['RUN_TESTS_COMMAND'] = run_tests_cmd
+    env['DOCKERFILE_DIR'] = dockerfile_dir
+    env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
+    if args.xml_report:
+        env['XML_REPORT'] = args.xml_report
+    if not args.travis:
+        env['TTY_FLAG'] = '-t'  # enables Ctrl-C when not on Jenkins.
 
-  subprocess.check_call('tools/run_tests/dockerize/build_docker_and_run_tests.sh',
-                        shell=True,
-                        env=env)
-  sys.exit(0)
+    subprocess.check_call(
+        'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
+        shell=True,
+        env=env)
+    sys.exit(0)
 
 _check_arch_option(args.arch)
 
+
 def make_jobspec(cfg, targets, makefile='Makefile'):
-  if platform_string() == 'windows':
-    return [jobset.JobSpec(['cmake', '--build', '.',
-                            '--target', '%s' % target,
-                            '--config', _MSBUILD_CONFIG[cfg]],
-                           cwd=os.path.dirname(makefile),
-                           timeout_seconds=None) for target in targets]
-  else:
-    if targets and makefile.startswith('cmake/build/'):
-      # With cmake, we've passed all the build configuration in the pre-build step already
-      return [jobset.JobSpec([os.getenv('MAKE', 'make'),
-                              '-j', '%d' % args.jobs] +
-                             targets,
-                             cwd='cmake/build',
-                             timeout_seconds=None)]
-    if targets:
-      return [jobset.JobSpec([os.getenv('MAKE', 'make'),
-                              '-f', makefile,
-                              '-j', '%d' % args.jobs,
-                              'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' % args.slowdown,
-                              'CONFIG=%s' % cfg,
-                              'Q='] +
-                              language_make_options +
-                             ([] if not args.travis else ['JENKINS_BUILD=1']) +
-                             targets,
-                             timeout_seconds=None)]
+    if platform_string() == 'windows':
+        return [
+            jobset.JobSpec(
+                [
+                    'cmake', '--build', '.', '--target', '%s' % target,
+                    '--config', _MSBUILD_CONFIG[cfg]
+                ],
+                cwd=os.path.dirname(makefile),
+                timeout_seconds=None) for target in targets
+        ]
     else:
-      return []
+        if targets and makefile.startswith('cmake/build/'):
+            # With cmake, we've passed all the build configuration in the pre-build step already
+            return [
+                jobset.JobSpec(
+                    [os.getenv('MAKE', 'make'), '-j', '%d' % args.jobs] +
+                    targets,
+                    cwd='cmake/build',
+                    timeout_seconds=None)
+            ]
+        if targets:
+            return [
+                jobset.JobSpec(
+                    [
+                        os.getenv('MAKE', 'make'), '-f', makefile, '-j', '%d' %
+                        args.jobs,
+                        'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
+                        args.slowdown, 'CONFIG=%s' % cfg, 'Q='
+                    ] + language_make_options +
+                    ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
+                    timeout_seconds=None)
+            ]
+        else:
+            return []
+
 
 make_targets = {}
 for l in languages:
-  makefile = l.makefile_name()
-  make_targets[makefile] = make_targets.get(makefile, set()).union(
-      set(l.make_targets()))
+    makefile = l.makefile_name()
+    make_targets[makefile] = make_targets.get(
+        makefile, set()).union(set(l.make_targets()))
+
 
 def build_step_environ(cfg):
-  environ = {'CONFIG': cfg}
-  msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
-  if msbuild_cfg:
-    environ['MSBUILD_CONFIG'] = msbuild_cfg
-  return environ
+    environ = {'CONFIG': cfg}
+    msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
+    if msbuild_cfg:
+        environ['MSBUILD_CONFIG'] = msbuild_cfg
+    return environ
 
-build_steps = list(set(
-                   jobset.JobSpec(cmdline, environ=build_step_environ(build_config), flake_retries=2)
-                   for l in languages
-                   for cmdline in l.pre_build_steps()))
+
+build_steps = list(
+    set(
+        jobset.JobSpec(
+            cmdline, environ=build_step_environ(build_config), flake_retries=2)
+        for l in languages for cmdline in l.pre_build_steps()))
 if make_targets:
-  make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.items())
-  build_steps.extend(set(make_commands))
-build_steps.extend(set(
-                   jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None)
-                   for l in languages
-                   for cmdline in l.build_steps()))
+    make_commands = itertools.chain.from_iterable(
+        make_jobspec(build_config, list(targets), makefile)
+        for (makefile, targets) in make_targets.items())
+    build_steps.extend(set(make_commands))
+build_steps.extend(
+    set(
+        jobset.JobSpec(
+            cmdline,
+            environ=build_step_environ(build_config),
+            timeout_seconds=None)
+        for l in languages for cmdline in l.build_steps()))
 
-post_tests_steps = list(set(
-                        jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
-                        for l in languages
-                        for cmdline in l.post_tests_steps()))
+post_tests_steps = list(
+    set(
+        jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
+        for l in languages for cmdline in l.post_tests_steps()))
 runs_per_test = args.runs_per_test
 forever = args.forever
 
 
 def _shut_down_legacy_server(legacy_server_port):
-  try:
-    version = int(urllib.request.urlopen(
-        'http://localhost:%d/version_number' % legacy_server_port,
-        timeout=10).read())
-  except:
-    pass
-  else:
-    urllib.request.urlopen(
-        'http://localhost:%d/quitquitquit' % legacy_server_port).read()
+    try:
+        version = int(
+            urllib.request.urlopen(
+                'http://localhost:%d/version_number' % legacy_server_port,
+                timeout=10).read())
+    except:
+        pass
+    else:
+        urllib.request.urlopen('http://localhost:%d/quitquitquit' %
+                               legacy_server_port).read()
 
 
 def _calculate_num_runs_failures(list_of_results):
-  """Caculate number of runs and failures for a particular test.
+    """Caculate number of runs and failures for a particular test.
 
   Args:
     list_of_results: (List) of JobResult object.
   Returns:
     A tuple of total number of runs and failures.
   """
-  num_runs = len(list_of_results)  # By default, there is 1 run per JobResult.
-  num_failures = 0
-  for jobresult in list_of_results:
-    if jobresult.retries > 0:
-      num_runs += jobresult.retries
-    if jobresult.num_failures > 0:
-      num_failures += jobresult.num_failures
-  return num_runs, num_failures
+    num_runs = len(list_of_results)  # By default, there is 1 run per JobResult.
+    num_failures = 0
+    for jobresult in list_of_results:
+        if jobresult.retries > 0:
+            num_runs += jobresult.retries
+        if jobresult.num_failures > 0:
+            num_failures += jobresult.num_failures
+    return num_runs, num_failures
 
 
 # _build_and_run results
 class BuildAndRunError(object):
 
-  BUILD = object()
-  TEST = object()
-  POST_TEST = object()
+    BUILD = object()
+    TEST = object()
+    POST_TEST = object()
 
 
 def _has_epollexclusive():
-  binary = 'bins/%s/check_epollexclusive' % args.config
-  if not os.path.exists(binary):
-    return False
-  try:
-    subprocess.check_call(binary)
-    return True
-  except subprocess.CalledProcessError, e:
-    return False
-  except OSError, e:
-    # For languages other than C and Windows the binary won't exist
-    return False
+    binary = 'bins/%s/check_epollexclusive' % args.config
+    if not os.path.exists(binary):
+        return False
+    try:
+        subprocess.check_call(binary)
+        return True
+    except subprocess.CalledProcessError, e:
+        return False
+    except OSError, e:
+        # For languages other than C and Windows the binary won't exist
+        return False
 
 
 # returns a list of things that failed (or an empty list on success)
-def _build_and_run(
-    check_cancelled, newline_on_success, xml_report=None, build_only=False):
-  """Do one pass of building & running tests."""
-  # build latest sequentially
-  num_failures, resultset = jobset.run(
-      build_steps, maxjobs=1, stop_on_failure=True,
-      newline_on_success=newline_on_success, travis=args.travis)
-  if num_failures:
-    return [BuildAndRunError.BUILD]
+def _build_and_run(check_cancelled,
+                   newline_on_success,
+                   xml_report=None,
+                   build_only=False):
+    """Do one pass of building & running tests."""
+    # build latest sequentially
+    num_failures, resultset = jobset.run(
+        build_steps,
+        maxjobs=1,
+        stop_on_failure=True,
+        newline_on_success=newline_on_success,
+        travis=args.travis)
+    if num_failures:
+        return [BuildAndRunError.BUILD]
 
-  if build_only:
-    if xml_report:
-      report_utils.render_junit_xml_report(resultset, xml_report,
-                                           suite_name=args.report_suite_name)
-    return []
+    if build_only:
+        if xml_report:
+            report_utils.render_junit_xml_report(
+                resultset, xml_report, suite_name=args.report_suite_name)
+        return []
 
-  if not args.travis and not _has_epollexclusive() and platform_string() in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string()]:
-    print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
-    _POLLING_STRATEGIES[platform_string()].remove('epollex')
+    if not args.travis and not _has_epollexclusive() and platform_string(
+    ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
+            platform_string()]:
+        print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
+        _POLLING_STRATEGIES[platform_string()].remove('epollex')
 
-  # start antagonists
-  antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
-                 for _ in range(0, args.antagonists)]
-  start_port_server.start_port_server()
-  resultset = None
-  num_test_failures = 0
-  try:
-    infinite_runs = runs_per_test == 0
-    one_run = set(
-      spec
-      for language in languages
-      for spec in language.test_specs()
-      if (re.search(args.regex, spec.shortname) and
-          (args.regex_exclude == '' or
-           not re.search(args.regex_exclude, spec.shortname))))
-    # When running on travis, we want out test runs to be as similar as possible
-    # for reproducibility purposes.
-    if args.travis and args.max_time <= 0:
-      massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
-    else:
-      # whereas otherwise, we want to shuffle things up to give all tests a
-      # chance to run.
-      massaged_one_run = list(one_run)  # random.sample needs an indexable seq.
-      num_jobs = len(massaged_one_run)
-      # for a random sample, get as many as indicated by the 'sample_percent'
-      # argument. By default this arg is 100, resulting in a shuffle of all
-      # jobs.
-      sample_size = int(num_jobs * args.sample_percent/100.0)
-      massaged_one_run = random.sample(massaged_one_run, sample_size)
-      if not isclose(args.sample_percent, 100.0):
-        assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
-        print("Running %d tests out of %d (~%d%%)" %
-              (sample_size, num_jobs, args.sample_percent))
-    if infinite_runs:
-      assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
-    runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
-                     else itertools.repeat(massaged_one_run, runs_per_test))
-    all_runs = itertools.chain.from_iterable(runs_sequence)
+    # start antagonists
+    antagonists = [
+        subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
+        for _ in range(0, args.antagonists)
+    ]
+    start_port_server.start_port_server()
+    resultset = None
+    num_test_failures = 0
+    try:
+        infinite_runs = runs_per_test == 0
+        one_run = set(spec
+                      for language in languages
+                      for spec in language.test_specs()
+                      if (re.search(args.regex, spec.shortname) and (
+                          args.regex_exclude == '' or not re.search(
+                              args.regex_exclude, spec.shortname))))
+        # When running on travis, we want out test runs to be as similar as possible
+        # for reproducibility purposes.
+        if args.travis and args.max_time <= 0:
+            massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
+        else:
+            # whereas otherwise, we want to shuffle things up to give all tests a
+            # chance to run.
+            massaged_one_run = list(
+                one_run)  # random.sample needs an indexable seq.
+            num_jobs = len(massaged_one_run)
+            # for a random sample, get as many as indicated by the 'sample_percent'
+            # argument. By default this arg is 100, resulting in a shuffle of all
+            # jobs.
+            sample_size = int(num_jobs * args.sample_percent / 100.0)
+            massaged_one_run = random.sample(massaged_one_run, sample_size)
+            if not isclose(args.sample_percent, 100.0):
+                assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
+                print("Running %d tests out of %d (~%d%%)" %
+                      (sample_size, num_jobs, args.sample_percent))
+        if infinite_runs:
+            assert len(massaged_one_run
+                      ) > 0, 'Must have at least one test for a -n inf run'
+        runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
+                         else itertools.repeat(massaged_one_run, runs_per_test))
+        all_runs = itertools.chain.from_iterable(runs_sequence)
 
-    if args.quiet_success:
-      jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True)
-    num_test_failures, resultset = jobset.run(
-        all_runs, check_cancelled, newline_on_success=newline_on_success,
-        travis=args.travis, maxjobs=args.jobs, maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
-        stop_on_failure=args.stop_on_failure,
-        quiet_success=args.quiet_success, max_time=args.max_time)
-    if resultset:
-      for k, v in sorted(resultset.items()):
-        num_runs, num_failures = _calculate_num_runs_failures(v)
-        if num_failures > 0:
-          if num_failures == num_runs:  # what about infinite_runs???
-            jobset.message('FAILED', k, do_newline=True)
-          else:
+        if args.quiet_success:
             jobset.message(
-                'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
+                'START',
+                'Running tests quietly, only failing tests will be reported',
                 do_newline=True)
-  finally:
-    for antagonist in antagonists:
-      antagonist.kill()
-    if args.bq_result_table and resultset:
-      upload_results_to_bq(resultset, args.bq_result_table, args, platform_string())
-    if xml_report and resultset:
-      report_utils.render_junit_xml_report(resultset, xml_report,
-                                           suite_name=args.report_suite_name)
+        num_test_failures, resultset = jobset.run(
+            all_runs,
+            check_cancelled,
+            newline_on_success=newline_on_success,
+            travis=args.travis,
+            maxjobs=args.jobs,
+            maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
+            stop_on_failure=args.stop_on_failure,
+            quiet_success=args.quiet_success,
+            max_time=args.max_time)
+        if resultset:
+            for k, v in sorted(resultset.items()):
+                num_runs, num_failures = _calculate_num_runs_failures(v)
+                if num_failures > 0:
+                    if num_failures == num_runs:  # what about infinite_runs???
+                        jobset.message('FAILED', k, do_newline=True)
+                    else:
+                        jobset.message(
+                            'FLAKE',
+                            '%s [%d/%d runs flaked]' %
+                            (k, num_failures, num_runs),
+                            do_newline=True)
+    finally:
+        for antagonist in antagonists:
+            antagonist.kill()
+        if args.bq_result_table and resultset:
+            upload_results_to_bq(resultset, args.bq_result_table, args,
+                                 platform_string())
+        if xml_report and resultset:
+            report_utils.render_junit_xml_report(
+                resultset, xml_report, suite_name=args.report_suite_name)
 
-  number_failures, _ = jobset.run(
-      post_tests_steps, maxjobs=1, stop_on_failure=False,
-      newline_on_success=newline_on_success, travis=args.travis)
+    number_failures, _ = jobset.run(
+        post_tests_steps,
+        maxjobs=1,
+        stop_on_failure=False,
+        newline_on_success=newline_on_success,
+        travis=args.travis)
 
-  out = []
-  if number_failures:
-    out.append(BuildAndRunError.POST_TEST)
-  if num_test_failures:
-    out.append(BuildAndRunError.TEST)
+    out = []
+    if number_failures:
+        out.append(BuildAndRunError.POST_TEST)
+    if num_test_failures:
+        out.append(BuildAndRunError.TEST)
 
-  return out
+    return out
 
 
 if forever:
-  success = True
-  while True:
-    dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
-    initial_time = dw.most_recent_change()
-    have_files_changed = lambda: dw.most_recent_change() != initial_time
-    previous_success = success
-    errors = _build_and_run(check_cancelled=have_files_changed,
-                            newline_on_success=False,
-                            build_only=args.build_only) == 0
-    if not previous_success and not errors:
-      jobset.message('SUCCESS',
-                     'All tests are now passing properly',
-                     do_newline=True)
-    jobset.message('IDLE', 'No change detected')
-    while not have_files_changed():
-      time.sleep(1)
+    success = True
+    while True:
+        dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
+        initial_time = dw.most_recent_change()
+        have_files_changed = lambda: dw.most_recent_change() != initial_time
+        previous_success = success
+        errors = _build_and_run(
+            check_cancelled=have_files_changed,
+            newline_on_success=False,
+            build_only=args.build_only) == 0
+        if not previous_success and not errors:
+            jobset.message(
+                'SUCCESS',
+                'All tests are now passing properly',
+                do_newline=True)
+        jobset.message('IDLE', 'No change detected')
+        while not have_files_changed():
+            time.sleep(1)
 else:
-  errors = _build_and_run(check_cancelled=lambda: False,
-                          newline_on_success=args.newline_on_success,
-                          xml_report=args.xml_report,
-                          build_only=args.build_only)
-  if not errors:
-    jobset.message('SUCCESS', 'All tests passed', do_newline=True)
-  else:
-    jobset.message('FAILED', 'Some tests failed', do_newline=True)
-  exit_code = 0
-  if BuildAndRunError.BUILD in errors:
-    exit_code |= 1
-  if BuildAndRunError.TEST in errors:
-    exit_code |= 2
-  if BuildAndRunError.POST_TEST in errors:
-    exit_code |= 4
-  sys.exit(exit_code)
+    errors = _build_and_run(
+        check_cancelled=lambda: False,
+        newline_on_success=args.newline_on_success,
+        xml_report=args.xml_report,
+        build_only=args.build_only)
+    if not errors:
+        jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+    else:
+        jobset.message('FAILED', 'Some tests failed', do_newline=True)
+    exit_code = 0
+    if BuildAndRunError.BUILD in errors:
+        exit_code |= 1
+    if BuildAndRunError.TEST in errors:
+        exit_code |= 2
+    if BuildAndRunError.POST_TEST in errors:
+        exit_code |= 4
+    sys.exit(exit_code)
diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py
index 7c58d8e..49be8f1 100755
--- a/tools/run_tests/run_tests_matrix.py
+++ b/tools/run_tests/run_tests_matrix.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Run test matrix."""
 
 from __future__ import print_function
@@ -29,14 +28,14 @@
 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 os.chdir(_ROOT)
 
-_DEFAULT_RUNTESTS_TIMEOUT = 1*60*60
+_DEFAULT_RUNTESTS_TIMEOUT = 1 * 60 * 60
 
 # Set the timeout high to allow enough time for sanitizers and pre-building
 # clang docker.
-_CPP_RUNTESTS_TIMEOUT = 4*60*60
+_CPP_RUNTESTS_TIMEOUT = 4 * 60 * 60
 
 # C++ TSAN takes longer than other sanitizers
-_CPP_TSAN_RUNTESTS_TIMEOUT = 8*60*60
+_CPP_TSAN_RUNTESTS_TIMEOUT = 8 * 60 * 60
 
 # Number of jobs assigned to each run_tests.py instance
 _DEFAULT_INNER_JOBS = 2
@@ -46,448 +45,517 @@
 
 
 def _report_filename(name):
-  """Generates report file name"""
-  return 'report_%s_%s' % (name, _REPORT_SUFFIX)
+    """Generates report file name"""
+    return 'report_%s_%s' % (name, _REPORT_SUFFIX)
 
 
 def _report_filename_internal_ci(name):
-  """Generates report file name that leads to better presentation by internal CI"""
-  return '%s/%s' % (name, _REPORT_SUFFIX)
+    """Generates report file name that leads to better presentation by internal CI"""
+    return '%s/%s' % (name, _REPORT_SUFFIX)
 
 
-def _docker_jobspec(name, runtests_args=[], runtests_envs={},
+def _docker_jobspec(name,
+                    runtests_args=[],
+                    runtests_envs={},
                     inner_jobs=_DEFAULT_INNER_JOBS,
                     timeout_seconds=None):
-  """Run a single instance of run_tests.py in a docker container"""
-  if not timeout_seconds:
-    timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
-  test_job = jobset.JobSpec(
-          cmdline=['python', 'tools/run_tests/run_tests.py',
-                   '--use_docker',
-                   '-t',
-                   '-j', str(inner_jobs),
-                   '-x', _report_filename(name),
-                   '--report_suite_name', '%s' % name] + runtests_args,
-          environ=runtests_envs,
-          shortname='run_tests_%s' % name,
-          timeout_seconds=timeout_seconds)
-  return test_job
+    """Run a single instance of run_tests.py in a docker container"""
+    if not timeout_seconds:
+        timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
+    test_job = jobset.JobSpec(
+        cmdline=[
+            'python', 'tools/run_tests/run_tests.py', '--use_docker', '-t',
+            '-j', str(inner_jobs), '-x', _report_filename(name),
+            '--report_suite_name', '%s' % name
+        ] + runtests_args,
+        environ=runtests_envs,
+        shortname='run_tests_%s' % name,
+        timeout_seconds=timeout_seconds)
+    return test_job
 
 
-def _workspace_jobspec(name, runtests_args=[], workspace_name=None,
-                       runtests_envs={}, inner_jobs=_DEFAULT_INNER_JOBS,
+def _workspace_jobspec(name,
+                       runtests_args=[],
+                       workspace_name=None,
+                       runtests_envs={},
+                       inner_jobs=_DEFAULT_INNER_JOBS,
                        timeout_seconds=None):
-  """Run a single instance of run_tests.py in a separate workspace"""
-  if not workspace_name:
-    workspace_name = 'workspace_%s' % name
-  if not timeout_seconds:
-    timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
-  env = {'WORKSPACE_NAME': workspace_name}
-  env.update(runtests_envs)
-  test_job = jobset.JobSpec(
-          cmdline=['bash',
-                   'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
-                   '-t',
-                   '-j', str(inner_jobs),
-                   '-x', '../%s' % _report_filename(name),
-                   '--report_suite_name', '%s' % name] + runtests_args,
-          environ=env,
-          shortname='run_tests_%s' % name,
-          timeout_seconds=timeout_seconds)
-  return test_job
+    """Run a single instance of run_tests.py in a separate workspace"""
+    if not workspace_name:
+        workspace_name = 'workspace_%s' % name
+    if not timeout_seconds:
+        timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
+    env = {'WORKSPACE_NAME': workspace_name}
+    env.update(runtests_envs)
+    test_job = jobset.JobSpec(
+        cmdline=[
+            'bash', 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
+            '-t', '-j', str(inner_jobs), '-x', '../%s' % _report_filename(name),
+            '--report_suite_name', '%s' % name
+        ] + runtests_args,
+        environ=env,
+        shortname='run_tests_%s' % name,
+        timeout_seconds=timeout_seconds)
+    return test_job
 
 
-def _generate_jobs(languages, configs, platforms, iomgr_platform = 'native',
-                  arch=None, compiler=None,
-                  labels=[], extra_args=[], extra_envs={},
-                  inner_jobs=_DEFAULT_INNER_JOBS,
-                  timeout_seconds=None):
-  result = []
-  for language in languages:
-    for platform in platforms:
-      for config in configs:
-        name = '%s_%s_%s_%s' % (language, platform, config, iomgr_platform)
-        runtests_args = ['-l', language,
-                         '-c', config,
-                         '--iomgr_platform', iomgr_platform]
-        if arch or compiler:
-          name += '_%s_%s' % (arch, compiler)
-          runtests_args += ['--arch', arch,
-                            '--compiler', compiler]
-        if '--build_only' in extra_args:
-          name += '_buildonly'
-        for extra_env in extra_envs:
-          name += '_%s_%s' % (extra_env, extra_envs[extra_env])
+def _generate_jobs(languages,
+                   configs,
+                   platforms,
+                   iomgr_platform='native',
+                   arch=None,
+                   compiler=None,
+                   labels=[],
+                   extra_args=[],
+                   extra_envs={},
+                   inner_jobs=_DEFAULT_INNER_JOBS,
+                   timeout_seconds=None):
+    result = []
+    for language in languages:
+        for platform in platforms:
+            for config in configs:
+                name = '%s_%s_%s_%s' % (language, platform, config,
+                                        iomgr_platform)
+                runtests_args = [
+                    '-l', language, '-c', config, '--iomgr_platform',
+                    iomgr_platform
+                ]
+                if arch or compiler:
+                    name += '_%s_%s' % (arch, compiler)
+                    runtests_args += ['--arch', arch, '--compiler', compiler]
+                if '--build_only' in extra_args:
+                    name += '_buildonly'
+                for extra_env in extra_envs:
+                    name += '_%s_%s' % (extra_env, extra_envs[extra_env])
 
-        runtests_args += extra_args
-        if platform == 'linux':
-          job = _docker_jobspec(name=name, runtests_args=runtests_args,
-                                runtests_envs=extra_envs, inner_jobs=inner_jobs,
-                                timeout_seconds=timeout_seconds)
-        else:
-          job = _workspace_jobspec(name=name, runtests_args=runtests_args,
-                                   runtests_envs=extra_envs, inner_jobs=inner_jobs,
-                                   timeout_seconds=timeout_seconds)
+                runtests_args += extra_args
+                if platform == 'linux':
+                    job = _docker_jobspec(
+                        name=name,
+                        runtests_args=runtests_args,
+                        runtests_envs=extra_envs,
+                        inner_jobs=inner_jobs,
+                        timeout_seconds=timeout_seconds)
+                else:
+                    job = _workspace_jobspec(
+                        name=name,
+                        runtests_args=runtests_args,
+                        runtests_envs=extra_envs,
+                        inner_jobs=inner_jobs,
+                        timeout_seconds=timeout_seconds)
 
-        job.labels = [platform, config, language, iomgr_platform] + labels
-        result.append(job)
-  return result
+                job.labels = [platform, config, language, iomgr_platform
+                             ] + labels
+                result.append(job)
+    return result
 
 
 def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
-  test_jobs = []
-  # supported on linux only
-  test_jobs += _generate_jobs(languages=['sanity', 'php7'],
-                             configs=['dbg', 'opt'],
-                             platforms=['linux'],
-                             labels=['basictests', 'multilang'],
-                             extra_args=extra_args,
-                             inner_jobs=inner_jobs)
+    test_jobs = []
+    # supported on linux only
+    test_jobs += _generate_jobs(
+        languages=['sanity', 'php7'],
+        configs=['dbg', 'opt'],
+        platforms=['linux'],
+        labels=['basictests', 'multilang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  # supported on all platforms.
-  test_jobs += _generate_jobs(languages=['c'],
-                             configs=['dbg', 'opt'],
-                             platforms=['linux', 'macos', 'windows'],
-                             labels=['basictests', 'corelang'],
-                             extra_args=extra_args,
-                             inner_jobs=inner_jobs,
-                             timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+    # supported on all platforms.
+    test_jobs += _generate_jobs(
+        languages=['c'],
+        configs=['dbg', 'opt'],
+        platforms=['linux', 'macos', 'windows'],
+        labels=['basictests', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs,
+        timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
 
-  test_jobs += _generate_jobs(languages=['csharp', 'python'],
-                             configs=['dbg', 'opt'],
-                             platforms=['linux', 'macos', 'windows'],
-                             labels=['basictests', 'multilang'],
-                             extra_args=extra_args,
-                             inner_jobs=inner_jobs)
+    test_jobs += _generate_jobs(
+        languages=['csharp', 'python'],
+        configs=['dbg', 'opt'],
+        platforms=['linux', 'macos', 'windows'],
+        labels=['basictests', 'multilang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  # supported on linux and mac.
-  test_jobs += _generate_jobs(languages=['c++'],
-                              configs=['dbg', 'opt'],
-                              platforms=['linux', 'macos'],
-                              labels=['basictests', 'corelang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs,
-                              timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+    # supported on linux and mac.
+    test_jobs += _generate_jobs(
+        languages=['c++'],
+        configs=['dbg', 'opt'],
+        platforms=['linux', 'macos'],
+        labels=['basictests', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs,
+        timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
 
-  test_jobs += _generate_jobs(languages=['grpc-node', 'ruby', 'php'],
-                              configs=['dbg', 'opt'],
-                              platforms=['linux', 'macos'],
-                              labels=['basictests', 'multilang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs)
+    test_jobs += _generate_jobs(
+        languages=['grpc-node', 'ruby', 'php'],
+        configs=['dbg', 'opt'],
+        platforms=['linux', 'macos'],
+        labels=['basictests', 'multilang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  # supported on mac only.
-  test_jobs += _generate_jobs(languages=['objc'],
-                              configs=['dbg', 'opt'],
-                              platforms=['macos'],
-                              labels=['basictests', 'multilang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs)
+    # supported on mac only.
+    test_jobs += _generate_jobs(
+        languages=['objc'],
+        configs=['dbg', 'opt'],
+        platforms=['macos'],
+        labels=['basictests', 'multilang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  # sanitizers
-  test_jobs += _generate_jobs(languages=['c'],
-                              configs=['msan', 'asan', 'tsan', 'ubsan'],
-                              platforms=['linux'],
-                              labels=['sanitizers', 'corelang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs,
-                              timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
-  test_jobs += _generate_jobs(languages=['c++'],
-                              configs=['asan'],
-                              platforms=['linux'],
-                              labels=['sanitizers', 'corelang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs,
-                              timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
-  test_jobs += _generate_jobs(languages=['c++'],
-                              configs=['tsan'],
-                              platforms=['linux'],
-                              labels=['sanitizers', 'corelang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs,
-                              timeout_seconds=_CPP_TSAN_RUNTESTS_TIMEOUT)
+    # sanitizers
+    test_jobs += _generate_jobs(
+        languages=['c'],
+        configs=['msan', 'asan', 'tsan', 'ubsan'],
+        platforms=['linux'],
+        labels=['sanitizers', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs,
+        timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+    test_jobs += _generate_jobs(
+        languages=['c++'],
+        configs=['asan'],
+        platforms=['linux'],
+        labels=['sanitizers', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs,
+        timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+    test_jobs += _generate_jobs(
+        languages=['c++'],
+        configs=['tsan'],
+        platforms=['linux'],
+        labels=['sanitizers', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs,
+        timeout_seconds=_CPP_TSAN_RUNTESTS_TIMEOUT)
 
-  return test_jobs
+    return test_jobs
 
 
-def _create_portability_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
-  test_jobs = []
-  # portability C x86
-  test_jobs += _generate_jobs(languages=['c'],
-                              configs=['dbg'],
-                              platforms=['linux'],
-                              arch='x86',
-                              compiler='default',
-                              labels=['portability', 'corelang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs)
+def _create_portability_test_jobs(extra_args=[],
+                                  inner_jobs=_DEFAULT_INNER_JOBS):
+    test_jobs = []
+    # portability C x86
+    test_jobs += _generate_jobs(
+        languages=['c'],
+        configs=['dbg'],
+        platforms=['linux'],
+        arch='x86',
+        compiler='default',
+        labels=['portability', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  # portability C and C++ on x64
-  for compiler in ['gcc4.8', 'gcc5.3', 'gcc_musl',
-                   'clang3.5', 'clang3.6', 'clang3.7']:
-    test_jobs += _generate_jobs(languages=['c', 'c++'],
-                                configs=['dbg'],
-                                platforms=['linux'],
-                                arch='x64',
-                                compiler=compiler,
-                                labels=['portability', 'corelang'],
-                                extra_args=extra_args,
-                                inner_jobs=inner_jobs,
-                                timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+    # portability C and C++ on x64
+    for compiler in [
+            'gcc4.8', 'gcc5.3', 'gcc_musl', 'clang3.5', 'clang3.6', 'clang3.7'
+    ]:
+        test_jobs += _generate_jobs(
+            languages=['c', 'c++'],
+            configs=['dbg'],
+            platforms=['linux'],
+            arch='x64',
+            compiler=compiler,
+            labels=['portability', 'corelang'],
+            extra_args=extra_args,
+            inner_jobs=inner_jobs,
+            timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
 
-  # portability C on Windows 64-bit (x86 is the default)
-  test_jobs += _generate_jobs(languages=['c'],
-                              configs=['dbg'],
-                              platforms=['windows'],
-                              arch='x64',
-                              compiler='default',
-                              labels=['portability', 'corelang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs)
+    # portability C on Windows 64-bit (x86 is the default)
+    test_jobs += _generate_jobs(
+        languages=['c'],
+        configs=['dbg'],
+        platforms=['windows'],
+        arch='x64',
+        compiler='default',
+        labels=['portability', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  # portability C++ on Windows
-  # TODO(jtattermusch): some of the tests are failing, so we force --build_only
-  test_jobs += _generate_jobs(languages=['c++'],
-                              configs=['dbg'],
-                              platforms=['windows'],
-                              arch='default',
-                              compiler='default',
-                              labels=['portability', 'corelang'],
-                              extra_args=extra_args + ['--build_only'],
-                              inner_jobs=inner_jobs)
+    # portability C++ on Windows
+    # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+    test_jobs += _generate_jobs(
+        languages=['c++'],
+        configs=['dbg'],
+        platforms=['windows'],
+        arch='default',
+        compiler='default',
+        labels=['portability', 'corelang'],
+        extra_args=extra_args + ['--build_only'],
+        inner_jobs=inner_jobs)
 
-  # portability C and C++ on Windows using VS2017 (build only)
-  # TODO(jtattermusch): some of the tests are failing, so we force --build_only
-  test_jobs += _generate_jobs(languages=['c', 'c++'],
-                              configs=['dbg'],
-                              platforms=['windows'],
-                              arch='x64',
-                              compiler='cmake_vs2017',
-                              labels=['portability', 'corelang'],
-                              extra_args=extra_args + ['--build_only'],
-                              inner_jobs=inner_jobs)
+    # portability C and C++ on Windows using VS2017 (build only)
+    # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+    test_jobs += _generate_jobs(
+        languages=['c', 'c++'],
+        configs=['dbg'],
+        platforms=['windows'],
+        arch='x64',
+        compiler='cmake_vs2017',
+        labels=['portability', 'corelang'],
+        extra_args=extra_args + ['--build_only'],
+        inner_jobs=inner_jobs)
 
-  # C and C++ with the c-ares DNS resolver on Linux
-  test_jobs += _generate_jobs(languages=['c', 'c++'],
-                              configs=['dbg'], platforms=['linux'],
-                              labels=['portability', 'corelang'],
-                              extra_args=extra_args,
-                              extra_envs={'GRPC_DNS_RESOLVER': 'ares'},
-                              timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+    # C and C++ with the c-ares DNS resolver on Linux
+    test_jobs += _generate_jobs(
+        languages=['c', 'c++'],
+        configs=['dbg'],
+        platforms=['linux'],
+        labels=['portability', 'corelang'],
+        extra_args=extra_args,
+        extra_envs={'GRPC_DNS_RESOLVER': 'ares'},
+        timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
 
-  # TODO(zyc): Turn on this test after adding c-ares support on windows.
-  # C with the c-ares DNS resolver on Windows
-  # test_jobs += _generate_jobs(languages=['c'],
-  #                             configs=['dbg'], platforms=['windows'],
-  #                             labels=['portability', 'corelang'],
-  #                             extra_args=extra_args,
-  #                             extra_envs={'GRPC_DNS_RESOLVER': 'ares'})
+    # TODO(zyc): Turn on this test after adding c-ares support on windows.
+    # C with the c-ares DNS resolver on Windows
+    # test_jobs += _generate_jobs(languages=['c'],
+    #                             configs=['dbg'], platforms=['windows'],
+    #                             labels=['portability', 'corelang'],
+    #                             extra_args=extra_args,
+    #                             extra_envs={'GRPC_DNS_RESOLVER': 'ares'})
 
-  # C and C++ build with cmake on Linux
-  # TODO(jtattermusch): some of the tests are failing, so we force --build_only
-  # to make sure it's buildable at least.
-  test_jobs += _generate_jobs(languages=['c', 'c++'],
-                              configs=['dbg'],
-                              platforms=['linux'],
-                              arch='default',
-                              compiler='cmake',
-                              labels=['portability', 'corelang'],
-                              extra_args=extra_args + ['--build_only'],
-                              inner_jobs=inner_jobs)
+    # C and C++ build with cmake on Linux
+    # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+    # to make sure it's buildable at least.
+    test_jobs += _generate_jobs(
+        languages=['c', 'c++'],
+        configs=['dbg'],
+        platforms=['linux'],
+        arch='default',
+        compiler='cmake',
+        labels=['portability', 'corelang'],
+        extra_args=extra_args + ['--build_only'],
+        inner_jobs=inner_jobs)
 
-  test_jobs += _generate_jobs(languages=['python'],
-                              configs=['dbg'],
-                              platforms=['linux'],
-                              arch='default',
-                              compiler='python_alpine',
-                              labels=['portability', 'multilang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs)
+    test_jobs += _generate_jobs(
+        languages=['python'],
+        configs=['dbg'],
+        platforms=['linux'],
+        arch='default',
+        compiler='python_alpine',
+        labels=['portability', 'multilang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  test_jobs += _generate_jobs(languages=['csharp'],
-                              configs=['dbg'],
-                              platforms=['linux'],
-                              arch='default',
-                              compiler='coreclr',
-                              labels=['portability', 'multilang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs)
+    test_jobs += _generate_jobs(
+        languages=['csharp'],
+        configs=['dbg'],
+        platforms=['linux'],
+        arch='default',
+        compiler='coreclr',
+        labels=['portability', 'multilang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  test_jobs += _generate_jobs(languages=['c'],
-                              configs=['dbg'],
-                              platforms=['linux'],
-                              iomgr_platform='uv',
-                              labels=['portability', 'corelang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs,
-                              timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+    test_jobs += _generate_jobs(
+        languages=['c'],
+        configs=['dbg'],
+        platforms=['linux'],
+        iomgr_platform='uv',
+        labels=['portability', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs,
+        timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
 
-  return test_jobs
+    return test_jobs
 
 
 def _allowed_labels():
-  """Returns a list of existing job labels."""
-  all_labels = set()
-  for job in _create_test_jobs() + _create_portability_test_jobs():
-    for label in job.labels:
-      all_labels.add(label)
-  return sorted(all_labels)
+    """Returns a list of existing job labels."""
+    all_labels = set()
+    for job in _create_test_jobs() + _create_portability_test_jobs():
+        for label in job.labels:
+            all_labels.add(label)
+    return sorted(all_labels)
 
 
 def _runs_per_test_type(arg_str):
-  """Auxiliary function to parse the "runs_per_test" flag."""
-  try:
-    n = int(arg_str)
-    if n <= 0: raise ValueError
-    return n
-  except:
-    msg = '\'{}\' is not a positive integer'.format(arg_str)
-    raise argparse.ArgumentTypeError(msg)
+    """Auxiliary function to parse the "runs_per_test" flag."""
+    try:
+        n = int(arg_str)
+        if n <= 0: raise ValueError
+        return n
+    except:
+        msg = '\'{}\' is not a positive integer'.format(arg_str)
+        raise argparse.ArgumentTypeError(msg)
 
 
 if __name__ == "__main__":
-  argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.')
-  argp.add_argument('-j', '--jobs',
-                    default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS,
-                    type=int,
-                    help='Number of concurrent run_tests.py instances.')
-  argp.add_argument('-f', '--filter',
-                    choices=_allowed_labels(),
-                    nargs='+',
-                    default=[],
-                    help='Filter targets to run by label with AND semantics.')
-  argp.add_argument('--exclude',
-                    choices=_allowed_labels(),
-                    nargs='+',
-                    default=[],
-                    help='Exclude targets with any of given labels.')
-  argp.add_argument('--build_only',
-                    default=False,
-                    action='store_const',
-                    const=True,
-                    help='Pass --build_only flag to run_tests.py instances.')
-  argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
-                    help='Pass --force_default_poller to run_tests.py instances.')
-  argp.add_argument('--dry_run',
-                    default=False,
-                    action='store_const',
-                    const=True,
-                    help='Only print what would be run.')
-  argp.add_argument('--filter_pr_tests',
-                    default=False,
-                    action='store_const',
-                    const=True,
-                    help='Filters out tests irrelevant to pull request changes.')
-  argp.add_argument('--base_branch',
-                    default='origin/master',
-                    type=str,
-                    help='Branch that pull request is requesting to merge into')
-  argp.add_argument('--inner_jobs',
-                    default=_DEFAULT_INNER_JOBS,
-                    type=int,
-                    help='Number of jobs in each run_tests.py instance')
-  argp.add_argument('-n', '--runs_per_test', default=1, type=_runs_per_test_type,
-                    help='How many times to run each tests. >1 runs implies ' +
-                    'omitting passing test from the output & reports.')
-  argp.add_argument('--max_time', default=-1, type=int,
-                    help='Maximum amount of time to run tests for' +
-                         '(other tests will be skipped)')
-  argp.add_argument('--internal_ci',
-                    default=False,
-                    action='store_const',
-                    const=True,
-                    help='Put reports into subdirectories to improve presentation of '
-                    'results by Internal CI.')
-  argp.add_argument('--bq_result_table',
-                    default='',
-                    type=str,
-                    nargs='?',
-                    help='Upload test results to a specified BQ table.')
-  args = argp.parse_args()
+    argp = argparse.ArgumentParser(
+        description='Run a matrix of run_tests.py tests.')
+    argp.add_argument(
+        '-j',
+        '--jobs',
+        default=multiprocessing.cpu_count() / _DEFAULT_INNER_JOBS,
+        type=int,
+        help='Number of concurrent run_tests.py instances.')
+    argp.add_argument(
+        '-f',
+        '--filter',
+        choices=_allowed_labels(),
+        nargs='+',
+        default=[],
+        help='Filter targets to run by label with AND semantics.')
+    argp.add_argument(
+        '--exclude',
+        choices=_allowed_labels(),
+        nargs='+',
+        default=[],
+        help='Exclude targets with any of given labels.')
+    argp.add_argument(
+        '--build_only',
+        default=False,
+        action='store_const',
+        const=True,
+        help='Pass --build_only flag to run_tests.py instances.')
+    argp.add_argument(
+        '--force_default_poller',
+        default=False,
+        action='store_const',
+        const=True,
+        help='Pass --force_default_poller to run_tests.py instances.')
+    argp.add_argument(
+        '--dry_run',
+        default=False,
+        action='store_const',
+        const=True,
+        help='Only print what would be run.')
+    argp.add_argument(
+        '--filter_pr_tests',
+        default=False,
+        action='store_const',
+        const=True,
+        help='Filters out tests irrelevant to pull request changes.')
+    argp.add_argument(
+        '--base_branch',
+        default='origin/master',
+        type=str,
+        help='Branch that pull request is requesting to merge into')
+    argp.add_argument(
+        '--inner_jobs',
+        default=_DEFAULT_INNER_JOBS,
+        type=int,
+        help='Number of jobs in each run_tests.py instance')
+    argp.add_argument(
+        '-n',
+        '--runs_per_test',
+        default=1,
+        type=_runs_per_test_type,
+        help='How many times to run each tests. >1 runs implies ' +
+        'omitting passing test from the output & reports.')
+    argp.add_argument(
+        '--max_time',
+        default=-1,
+        type=int,
+        help='Maximum amount of time to run tests for' +
+        '(other tests will be skipped)')
+    argp.add_argument(
+        '--internal_ci',
+        default=False,
+        action='store_const',
+        const=True,
+        help='Put reports into subdirectories to improve presentation of '
+        'results by Internal CI.')
+    argp.add_argument(
+        '--bq_result_table',
+        default='',
+        type=str,
+        nargs='?',
+        help='Upload test results to a specified BQ table.')
+    args = argp.parse_args()
 
-  if args.internal_ci:
-    _report_filename = _report_filename_internal_ci  # override the function
+    if args.internal_ci:
+        _report_filename = _report_filename_internal_ci  # override the function
 
-  extra_args = []
-  if args.build_only:
-    extra_args.append('--build_only')
-  if args.force_default_poller:
-    extra_args.append('--force_default_poller')
-  if args.runs_per_test > 1:
-    extra_args.append('-n')
-    extra_args.append('%s' % args.runs_per_test)
-    extra_args.append('--quiet_success')
-  if args.max_time > 0:
-    extra_args.extend(('--max_time', '%d' % args.max_time))
-  if args.bq_result_table:
-    extra_args.append('--bq_result_table')
-    extra_args.append('%s' % args.bq_result_table)
-    extra_args.append('--measure_cpu_costs')
-    extra_args.append('--disable_auto_set_flakes')
+    extra_args = []
+    if args.build_only:
+        extra_args.append('--build_only')
+    if args.force_default_poller:
+        extra_args.append('--force_default_poller')
+    if args.runs_per_test > 1:
+        extra_args.append('-n')
+        extra_args.append('%s' % args.runs_per_test)
+        extra_args.append('--quiet_success')
+    if args.max_time > 0:
+        extra_args.extend(('--max_time', '%d' % args.max_time))
+    if args.bq_result_table:
+        extra_args.append('--bq_result_table')
+        extra_args.append('%s' % args.bq_result_table)
+        extra_args.append('--measure_cpu_costs')
+        extra_args.append('--disable_auto_set_flakes')
 
-  all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
-             _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
+    all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
+               _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
 
-  jobs = []
-  for job in all_jobs:
-    if not args.filter or all(filter in job.labels for filter in args.filter):
-      if not any(exclude_label in job.labels for exclude_label in args.exclude):
-        jobs.append(job)
+    jobs = []
+    for job in all_jobs:
+        if not args.filter or all(filter in job.labels
+                                  for filter in args.filter):
+            if not any(exclude_label in job.labels
+                       for exclude_label in args.exclude):
+                jobs.append(job)
 
-  if not jobs:
-    jobset.message('FAILED', 'No test suites match given criteria.',
-                   do_newline=True)
-    sys.exit(1)
+    if not jobs:
+        jobset.message(
+            'FAILED', 'No test suites match given criteria.', do_newline=True)
+        sys.exit(1)
 
-  print('IMPORTANT: The changes you are testing need to be locally committed')
-  print('because only the committed changes in the current branch will be')
-  print('copied to the docker environment or into subworkspaces.')
+    print('IMPORTANT: The changes you are testing need to be locally committed')
+    print('because only the committed changes in the current branch will be')
+    print('copied to the docker environment or into subworkspaces.')
 
-  skipped_jobs = []
+    skipped_jobs = []
 
-  if args.filter_pr_tests:
-    print('Looking for irrelevant tests to skip...')
-    relevant_jobs = filter_tests(jobs, args.base_branch)
-    if len(relevant_jobs) == len(jobs):
-      print('No tests will be skipped.')
-    else:
-      print('These tests will be skipped:')
-      skipped_jobs = list(set(jobs) - set(relevant_jobs))
-      # Sort by shortnames to make printing of skipped tests consistent
-      skipped_jobs.sort(key=lambda job: job.shortname)
-      for job in list(skipped_jobs):
-        print('  %s' % job.shortname)
-    jobs = relevant_jobs
+    if args.filter_pr_tests:
+        print('Looking for irrelevant tests to skip...')
+        relevant_jobs = filter_tests(jobs, args.base_branch)
+        if len(relevant_jobs) == len(jobs):
+            print('No tests will be skipped.')
+        else:
+            print('These tests will be skipped:')
+            skipped_jobs = list(set(jobs) - set(relevant_jobs))
+            # Sort by shortnames to make printing of skipped tests consistent
+            skipped_jobs.sort(key=lambda job: job.shortname)
+            for job in list(skipped_jobs):
+                print('  %s' % job.shortname)
+        jobs = relevant_jobs
 
-  print('Will run these tests:')
-  for job in jobs:
+    print('Will run these tests:')
+    for job in jobs:
+        if args.dry_run:
+            print('  %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
+        else:
+            print('  %s' % job.shortname)
+    print
+
     if args.dry_run:
-      print('  %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
+        print('--dry_run was used, exiting')
+        sys.exit(1)
+
+    jobset.message('START', 'Running test matrix.', do_newline=True)
+    num_failures, resultset = jobset.run(
+        jobs, newline_on_success=True, travis=True, maxjobs=args.jobs)
+    # Merge skipped tests into results to show skipped tests on report.xml
+    if skipped_jobs:
+        ignored_num_skipped_failures, skipped_results = jobset.run(
+            skipped_jobs, skip_jobs=True)
+        resultset.update(skipped_results)
+    report_utils.render_junit_xml_report(
+        resultset,
+        _report_filename('aggregate_tests'),
+        suite_name='aggregate_tests')
+
+    if num_failures == 0:
+        jobset.message(
+            'SUCCESS',
+            'All run_tests.py instance finished successfully.',
+            do_newline=True)
     else:
-      print('  %s' % job.shortname)
-  print
-
-  if args.dry_run:
-    print('--dry_run was used, exiting')
-    sys.exit(1)
-
-  jobset.message('START', 'Running test matrix.', do_newline=True)
-  num_failures, resultset = jobset.run(jobs,
-                                       newline_on_success=True,
-                                       travis=True,
-                                       maxjobs=args.jobs)
-  # Merge skipped tests into results to show skipped tests on report.xml
-  if skipped_jobs:
-    ignored_num_skipped_failures, skipped_results = jobset.run(
-        skipped_jobs, skip_jobs=True)
-    resultset.update(skipped_results)
-  report_utils.render_junit_xml_report(resultset, _report_filename('aggregate_tests'),
-                                       suite_name='aggregate_tests')
-
-  if num_failures == 0:
-    jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.',
-                   do_newline=True)
-  else:
-    jobset.message('FAILED', 'Some run_tests.py instance have failed.',
-                   do_newline=True)
-    sys.exit(1)
+        jobset.message(
+            'FAILED',
+            'Some run_tests.py instance have failed.',
+            do_newline=True)
+        sys.exit(1)
diff --git a/tools/run_tests/sanity/check_bazel_workspace.py b/tools/run_tests/sanity/check_bazel_workspace.py
index 776c78b..62a6229 100755
--- a/tools/run_tests/sanity/check_bazel_workspace.py
+++ b/tools/run_tests/sanity/check_bazel_workspace.py
@@ -27,23 +27,103 @@
 git_hash_pattern = re.compile('[0-9a-f]{40}')
 
 # Parse git hashes from submodules
-git_submodules = subprocess.check_output('git submodule', shell=True).strip().split('\n')
-git_submodule_hashes = {re.search(git_hash_pattern, s).group() for s in git_submodules}
+git_submodules = subprocess.check_output(
+    'git submodule', shell=True).strip().split('\n')
+git_submodule_hashes = {
+    re.search(git_hash_pattern, s).group()
+    for s in git_submodules
+}
 
-# Parse git hashes from Bazel WORKSPACE {new_}http_archive rules
-with open('WORKSPACE', 'r') as f:
-  workspace_rules = [expr.value for expr in ast.parse(f.read()).body]
+_BAZEL_TOOLCHAINS_DEP_NAME = 'com_github_bazelbuild_bazeltoolchains'
 
-http_archive_rules = [rule for rule in workspace_rules if rule.func.id.endswith('http_archive')]
-archive_urls = [kw.value.s for rule in http_archive_rules for kw in rule.keywords if kw.arg == 'url']
-workspace_git_hashes = {re.search(git_hash_pattern, url).group() for url in archive_urls}
+_GRPC_DEP_NAMES = [
+    'boringssl',
+    'com_github_madler_zlib',
+    'com_google_protobuf',
+    'com_github_google_googletest',
+    'com_github_gflags_gflags',
+    'com_github_google_benchmark',
+    'com_github_cares_cares',
+    'com_google_absl',
+    _BAZEL_TOOLCHAINS_DEP_NAME,
+]
+
+
+class BazelEvalState(object):
+
+    def __init__(self, names_and_urls, overridden_name=None):
+        self.names_and_urls = names_and_urls
+        self.overridden_name = overridden_name
+
+    def http_archive(self, **args):
+        self.archive(**args)
+
+    def new_http_archive(self, **args):
+        self.archive(**args)
+
+    def bind(self, **args):
+        pass
+
+    def existing_rules(self):
+        if self.overridden_name:
+            return [self.overridden_name]
+        return []
+
+    def archive(self, **args):
+        if args['name'] == _BAZEL_TOOLCHAINS_DEP_NAME:
+            self.names_and_urls[args['name']] = 'dont care'
+            return
+        self.names_and_urls[args['name']] = args['url']
+
+
+# Parse git hashes from bazel/grpc_deps.bzl {new_}http_archive rules
+with open(os.path.join('bazel', 'grpc_deps.bzl'), 'r') as f:
+    names_and_urls = {}
+    eval_state = BazelEvalState(names_and_urls)
+    bazel_file = f.read()
+
+# grpc_deps.bzl only defines 'grpc_deps', add this to call it
+bazel_file += '\ngrpc_deps()\n'
+build_rules = {
+    'native': eval_state,
+}
+exec bazel_file in build_rules
+for name in _GRPC_DEP_NAMES:
+    assert name in names_and_urls.keys()
+assert len(_GRPC_DEP_NAMES) == len(names_and_urls.keys())
+
+# bazeltoolschains is an exception to this sanity check,
+# we don't require that there is a corresponding git module.
+names_without_bazeltoolchains = names_and_urls.keys()
+names_without_bazeltoolchains.remove(_BAZEL_TOOLCHAINS_DEP_NAME)
+archive_urls = [names_and_urls[name] for name in names_without_bazeltoolchains]
+workspace_git_hashes = {
+    re.search(git_hash_pattern, url).group()
+    for url in archive_urls
+}
+if len(workspace_git_hashes) == 0:
+    print("(Likely) parse error, did not find any bazel git dependencies.")
+    sys.exit(1)
 
 # Validate the equivalence of the git submodules and Bazel git dependencies. The
 # condition we impose is that there is a git submodule for every dependency in
 # the workspace, but not necessarily conversely. E.g. Bloaty is a dependency
 # not used by any of the targets built by Bazel.
 if len(workspace_git_hashes - git_submodule_hashes) > 0:
-    print("Found discrepancies between git submodules and Bazel WORKSPACE dependencies")
+    print(
+        "Found discrepancies between git submodules and Bazel WORKSPACE dependencies"
+    )
     sys.exit(1)
 
+# Also check that we can override each dependency
+for name in _GRPC_DEP_NAMES:
+    names_and_urls_with_overridden_name = {}
+    state = BazelEvalState(
+        names_and_urls_with_overridden_name, overridden_name=name)
+    rules = {
+        'native': state,
+    }
+    exec bazel_file in rules
+    assert name not in names_and_urls_with_overridden_name.keys()
+
 sys.exit(0)
diff --git a/tools/run_tests/sanity/check_sources_and_headers.py b/tools/run_tests/sanity/check_sources_and_headers.py
index 986b626..6a704eb 100755
--- a/tools/run_tests/sanity/check_sources_and_headers.py
+++ b/tools/run_tests/sanity/check_sources_and_headers.py
@@ -21,8 +21,10 @@
 import sys
 
 root = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
-with open(os.path.join(root, 'tools', 'run_tests', 'generated', 'sources_and_headers.json')) as f:
-  js = json.loads(f.read())
+with open(
+        os.path.join(root, 'tools', 'run_tests', 'generated',
+                     'sources_and_headers.json')) as f:
+    js = json.loads(f.read())
 
 re_inc1 = re.compile(r'^#\s*include\s*"([^"]*)"')
 assert re_inc1.match('#include "foo"').group(1) == 'foo'
@@ -31,35 +33,35 @@
 
 
 def get_target(name):
-  for target in js:
-    if target['name'] == name:
-      return target
-  assert False, 'no target %s' % name
+    for target in js:
+        if target['name'] == name:
+            return target
+    assert False, 'no target %s' % name
 
 
 def get_headers_transitive():
-  """Computes set of headers transitively provided by each target"""
-  target_headers_transitive = {}
-  for target in js:
-    target_name = target['name']
-    assert not target_headers_transitive.has_key(target_name)
-    target_headers_transitive[target_name] = set(target['headers'])
-
-  # Make sure each target's transitive headers contain those
-  # of their dependencies. If not, add them and continue doing
-  # so until we get a full pass over all targets without any updates.
-  closure_changed = True
-  while closure_changed:
-    closure_changed = False
+    """Computes set of headers transitively provided by each target"""
+    target_headers_transitive = {}
     for target in js:
-      target_name = target['name']
-      for dep in target['deps']:
-        headers = target_headers_transitive[target_name]
-        old_count = len(headers)
-        headers.update(target_headers_transitive[dep])
-        if old_count != len(headers):
-          closure_changed=True
-  return target_headers_transitive
+        target_name = target['name']
+        assert not target_headers_transitive.has_key(target_name)
+        target_headers_transitive[target_name] = set(target['headers'])
+
+    # Make sure each target's transitive headers contain those
+    # of their dependencies. If not, add them and continue doing
+    # so until we get a full pass over all targets without any updates.
+    closure_changed = True
+    while closure_changed:
+        closure_changed = False
+        for target in js:
+            target_name = target['name']
+            for dep in target['deps']:
+                headers = target_headers_transitive[target_name]
+                old_count = len(headers)
+                headers.update(target_headers_transitive[dep])
+                if old_count != len(headers):
+                    closure_changed = True
+    return target_headers_transitive
 
 
 # precompute transitive closure of headers provided by each target
@@ -67,18 +69,23 @@
 
 
 def target_has_header(target, name):
-  if name in target_headers_transitive[target['name']]:
-    return True
-  if name.startswith('absl/'):
-    return True
-  if name in ['src/core/lib/profiling/stap_probes.h',
-              'src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h']:
-    return True
-  return False
+    if name.startswith('absl/'): return True
+    # print target['name'], name
+    if name in target['headers']:
+        return True
+    for dep in target['deps']:
+        if target_has_header(get_target(dep), name):
+            return True
+    if name in [
+            'src/core/lib/profiling/stap_probes.h',
+            'src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h'
+    ]:
+        return True
+    return False
 
 
 def produces_object(name):
-  return os.path.splitext(name)[1] in ['.c', '.cc']
+    return os.path.splitext(name)[1] in ['.c', '.cc']
 
 
 c_ish = {}
@@ -86,36 +93,37 @@
 
 errors = 0
 for target in js:
-  if not target['third_party']:
-    for fn in target['src']:
-      with open(os.path.join(root, fn)) as f:
-        src = f.read().splitlines()
-      for line in src:
-        m = re_inc1.match(line)
-        if m:
-          if not target_has_header(target, m.group(1)):
-            print (
-              'target %s (%s) does not name header %s as a dependency' % (
-                target['name'], fn, m.group(1)))
-            errors += 1
-        m = re_inc2.match(line)
-        if m:
-          if not target_has_header(target, 'include/' + m.group(1)):
-            print (
-              'target %s (%s) does not name header %s as a dependency' % (
-                target['name'], fn, m.group(1)))
-            errors += 1
-  if target['type'] in ['lib', 'filegroup']:
-    for fn in target['src']:
-      language = target['language']
-      if produces_object(fn):
-        obj_base = os.path.splitext(os.path.basename(fn))[0]
-        if obj_base in obj_producer_to_source[language]:
-          if obj_producer_to_source[language][obj_base] != fn:
-            print (
-              'target %s (%s) produces an aliased object file with %s' % (
-                target['name'], fn, obj_producer_to_source[language][obj_base]))
-        else:
-          obj_producer_to_source[language][obj_base] = fn
+    if not target['third_party']:
+        for fn in target['src']:
+            with open(os.path.join(root, fn)) as f:
+                src = f.read().splitlines()
+            for line in src:
+                m = re_inc1.match(line)
+                if m:
+                    if not target_has_header(target, m.group(1)):
+                        print(
+                            'target %s (%s) does not name header %s as a dependency'
+                            % (target['name'], fn, m.group(1)))
+                        errors += 1
+                m = re_inc2.match(line)
+                if m:
+                    if not target_has_header(target, 'include/' + m.group(1)):
+                        print(
+                            'target %s (%s) does not name header %s as a dependency'
+                            % (target['name'], fn, m.group(1)))
+                        errors += 1
+    if target['type'] in ['lib', 'filegroup']:
+        for fn in target['src']:
+            language = target['language']
+            if produces_object(fn):
+                obj_base = os.path.splitext(os.path.basename(fn))[0]
+                if obj_base in obj_producer_to_source[language]:
+                    if obj_producer_to_source[language][obj_base] != fn:
+                        print(
+                            'target %s (%s) produces an aliased object file with %s'
+                            % (target['name'], fn,
+                               obj_producer_to_source[language][obj_base]))
+                else:
+                    obj_producer_to_source[language][obj_base] = fn
 
 assert errors == 0
diff --git a/tools/run_tests/sanity/check_test_filtering.py b/tools/run_tests/sanity/check_test_filtering.py
index ff4ecba..c2a6399 100755
--- a/tools/run_tests/sanity/check_test_filtering.py
+++ b/tools/run_tests/sanity/check_test_filtering.py
@@ -14,7 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
 import os
 import sys
 import unittest
@@ -25,108 +24,138 @@
 from run_tests_matrix import _create_test_jobs, _create_portability_test_jobs
 import python_utils.filter_pull_request_tests as filter_pull_request_tests
 
-_LIST_OF_LANGUAGE_LABELS = ['c', 'c++', 'csharp', 'grpc-node', 'objc', 'php', 'php7', 'python', 'ruby']
+_LIST_OF_LANGUAGE_LABELS = [
+    'c', 'c++', 'csharp', 'grpc-node', 'objc', 'php', 'php7', 'python', 'ruby'
+]
 _LIST_OF_PLATFORM_LABELS = ['linux', 'macos', 'windows']
 
+
 class TestFilteringTest(unittest.TestCase):
 
-  def generate_all_tests(self):
-    all_jobs = _create_test_jobs() + _create_portability_test_jobs()
-    self.assertIsNotNone(all_jobs)
-    return all_jobs
+    def generate_all_tests(self):
+        all_jobs = _create_test_jobs() + _create_portability_test_jobs()
+        self.assertIsNotNone(all_jobs)
+        return all_jobs
 
-  def test_filtering(self, changed_files=[], labels=_LIST_OF_LANGUAGE_LABELS):
-    """
+    def test_filtering(self, changed_files=[], labels=_LIST_OF_LANGUAGE_LABELS):
+        """
     Default args should filter no tests because changed_files is empty and
     default labels should be able to match all jobs
     :param changed_files: mock list of changed_files from pull request
     :param labels: list of job labels that should be skipped
     """
-    all_jobs = self.generate_all_tests()
-    # Replacing _get_changed_files function to allow specifying changed files in filter_tests function
-    def _get_changed_files(foo):
-      return changed_files
-    filter_pull_request_tests._get_changed_files = _get_changed_files
-    print()
-    filtered_jobs = filter_pull_request_tests.filter_tests(all_jobs, "test")
+        all_jobs = self.generate_all_tests()
 
-    # Make sure sanity tests aren't being filtered out
-    sanity_tests_in_all_jobs = 0
-    sanity_tests_in_filtered_jobs = 0
-    for job in all_jobs:
-      if "sanity" in job.labels:
-        sanity_tests_in_all_jobs += 1
-    all_jobs = [job for job in all_jobs if "sanity" not in job.labels]
-    for job in filtered_jobs:
-      if "sanity" in job.labels:
-        sanity_tests_in_filtered_jobs += 1
-    filtered_jobs = [job for job in filtered_jobs if "sanity" not in job.labels]
-    self.assertEquals(sanity_tests_in_all_jobs, sanity_tests_in_filtered_jobs)
+        # Replacing _get_changed_files function to allow specifying changed files in filter_tests function
+        def _get_changed_files(foo):
+            return changed_files
 
-    for label in labels:
-      for job in filtered_jobs:
-        self.assertNotIn(label, job.labels)
+        filter_pull_request_tests._get_changed_files = _get_changed_files
+        print()
+        filtered_jobs = filter_pull_request_tests.filter_tests(all_jobs, "test")
 
-    jobs_matching_labels = 0
-    for label in labels:
-      for job in all_jobs:
-        if (label in job.labels):
-          jobs_matching_labels += 1
-    self.assertEquals(len(filtered_jobs), len(all_jobs) - jobs_matching_labels)
+        # Make sure sanity tests aren't being filtered out
+        sanity_tests_in_all_jobs = 0
+        sanity_tests_in_filtered_jobs = 0
+        for job in all_jobs:
+            if "sanity" in job.labels:
+                sanity_tests_in_all_jobs += 1
+        all_jobs = [job for job in all_jobs if "sanity" not in job.labels]
+        for job in filtered_jobs:
+            if "sanity" in job.labels:
+                sanity_tests_in_filtered_jobs += 1
+        filtered_jobs = [
+            job for job in filtered_jobs if "sanity" not in job.labels
+        ]
+        self.assertEquals(sanity_tests_in_all_jobs,
+                          sanity_tests_in_filtered_jobs)
 
-  def test_individual_language_filters(self):
-    # Changing unlisted file should trigger all languages
-    self.test_filtering(['ffffoo/bar.baz'], [_LIST_OF_LANGUAGE_LABELS])
-    # Changing core should trigger all tests
-    self.test_filtering(['src/core/foo.bar'], [_LIST_OF_LANGUAGE_LABELS])
-    # Testing individual languages
-    self.test_filtering(['test/core/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                                                filter_pull_request_tests._CORE_TEST_SUITE.labels +
-                                                filter_pull_request_tests._CPP_TEST_SUITE.labels])
-    self.test_filtering(['src/cpp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                                              filter_pull_request_tests._CPP_TEST_SUITE.labels])
-    self.test_filtering(['src/csharp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                                                 filter_pull_request_tests._CSHARP_TEST_SUITE.labels])
-    self.test_filtering(['src/objective-c/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                                                      filter_pull_request_tests._OBJC_TEST_SUITE.labels])
-    self.test_filtering(['src/php/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                                              filter_pull_request_tests._PHP_TEST_SUITE.labels])
-    self.test_filtering(['src/python/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                                                 filter_pull_request_tests._PYTHON_TEST_SUITE.labels])
-    self.test_filtering(['src/ruby/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                                               filter_pull_request_tests._RUBY_TEST_SUITE.labels])
+        for label in labels:
+            for job in filtered_jobs:
+                self.assertNotIn(label, job.labels)
 
-  def test_combined_language_filters(self):
-    self.test_filtering(['src/cpp/foo.bar', 'test/core/foo.bar'],
-                        [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                         filter_pull_request_tests._CPP_TEST_SUITE.labels and label not in
-                         filter_pull_request_tests._CORE_TEST_SUITE.labels])
-    self.test_filtering(['src/cpp/foo.bar', "src/csharp/foo.bar"],
-                        [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                         filter_pull_request_tests._CPP_TEST_SUITE.labels and label not in
-                         filter_pull_request_tests._CSHARP_TEST_SUITE.labels])
-    self.test_filtering(['src/objective-c/foo.bar', 'src/php/foo.bar', "src/python/foo.bar", "src/ruby/foo.bar"],
-                        [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                         filter_pull_request_tests._OBJC_TEST_SUITE.labels and label not in
-                         filter_pull_request_tests._PHP_TEST_SUITE.labels and label not in
-                         filter_pull_request_tests._PYTHON_TEST_SUITE.labels and label not in
-                         filter_pull_request_tests._RUBY_TEST_SUITE.labels])
+        jobs_matching_labels = 0
+        for label in labels:
+            for job in all_jobs:
+                if (label in job.labels):
+                    jobs_matching_labels += 1
+        self.assertEquals(
+            len(filtered_jobs), len(all_jobs) - jobs_matching_labels)
 
-  def test_platform_filter(self):
-    self.test_filtering(['vsprojects/foo.bar'], [label for label in _LIST_OF_PLATFORM_LABELS if label not in
-                                                 filter_pull_request_tests._WINDOWS_TEST_SUITE.labels])
+    def test_individual_language_filters(self):
+        # Changing unlisted file should trigger all languages
+        self.test_filtering(['ffffoo/bar.baz'], [_LIST_OF_LANGUAGE_LABELS])
+        # Changing core should trigger all tests
+        self.test_filtering(['src/core/foo.bar'], [_LIST_OF_LANGUAGE_LABELS])
+        # Testing individual languages
+        self.test_filtering(['test/core/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._CORE_TEST_SUITE.labels +
+            filter_pull_request_tests._CPP_TEST_SUITE.labels
+        ])
+        self.test_filtering(['src/cpp/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels
+        ])
+        self.test_filtering(['src/csharp/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._CSHARP_TEST_SUITE.labels
+        ])
+        self.test_filtering(['src/objective-c/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._OBJC_TEST_SUITE.labels
+        ])
+        self.test_filtering(['src/php/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._PHP_TEST_SUITE.labels
+        ])
+        self.test_filtering(['src/python/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._PYTHON_TEST_SUITE.labels
+        ])
+        self.test_filtering(['src/ruby/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._RUBY_TEST_SUITE.labels
+        ])
 
-  def test_whitelist(self):
-    whitelist = filter_pull_request_tests._WHITELIST_DICT
-    files_that_should_trigger_all_tests = ['src/core/foo.bar',
-                                           'some_file_not_on_the_white_list',
-                                           'BUILD',
-                                           'etc/roots.pem',
-                                           'Makefile',
-                                           'tools/foo']
-    for key in whitelist.keys():
-      for file_name in files_that_should_trigger_all_tests:
-        self.assertFalse(re.match(key, file_name))
+    def test_combined_language_filters(self):
+        self.test_filtering(['src/cpp/foo.bar', 'test/core/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels and
+            label not in filter_pull_request_tests._CORE_TEST_SUITE.labels
+        ])
+        self.test_filtering(['src/cpp/foo.bar', "src/csharp/foo.bar"], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels and
+            label not in filter_pull_request_tests._CSHARP_TEST_SUITE.labels
+        ])
+        self.test_filtering([
+            'src/objective-c/foo.bar', 'src/php/foo.bar', "src/python/foo.bar",
+            "src/ruby/foo.bar"
+        ], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._OBJC_TEST_SUITE.labels
+            and label not in filter_pull_request_tests._PHP_TEST_SUITE.labels
+            and label not in filter_pull_request_tests._PYTHON_TEST_SUITE.labels
+            and label not in filter_pull_request_tests._RUBY_TEST_SUITE.labels
+        ])
+
+    def test_platform_filter(self):
+        self.test_filtering(['vsprojects/foo.bar'], [
+            label for label in _LIST_OF_PLATFORM_LABELS
+            if label not in filter_pull_request_tests._WINDOWS_TEST_SUITE.labels
+        ])
+
+    def test_whitelist(self):
+        whitelist = filter_pull_request_tests._WHITELIST_DICT
+        files_that_should_trigger_all_tests = [
+            'src/core/foo.bar', 'some_file_not_on_the_white_list', 'BUILD',
+            'etc/roots.pem', 'Makefile', 'tools/foo'
+        ]
+        for key in whitelist.keys():
+            for file_name in files_that_should_trigger_all_tests:
+                self.assertFalse(re.match(key, file_name))
+
 
 if __name__ == '__main__':
-  unittest.main(verbosity=2)
+    unittest.main(verbosity=2)
diff --git a/tools/run_tests/sanity/check_tracer_sanity.py b/tools/run_tests/sanity/check_tracer_sanity.py
index 997ec79..c4c7653 100755
--- a/tools/run_tests/sanity/check_tracer_sanity.py
+++ b/tools/run_tests/sanity/check_tracer_sanity.py
@@ -26,21 +26,22 @@
 tracers = []
 pattern = re.compile("GRPC_TRACER_INITIALIZER\((true|false), \"(.*)\"\)")
 for root, dirs, files in os.walk('src/core'):
-  for filename in files:
-    path = os.path.join(root, filename)
-    if os.path.splitext(path)[1] != '.c': continue
-    with open(path) as f:
-      text = f.read()
-    for o in pattern.findall(text):
-      tracers.append(o[1])
+    for filename in files:
+        path = os.path.join(root, filename)
+        if os.path.splitext(path)[1] != '.c': continue
+        with open(path) as f:
+            text = f.read()
+        for o in pattern.findall(text):
+            tracers.append(o[1])
 
 with open('doc/environment_variables.md') as f:
- text = f.read()
+    text = f.read()
 
 for t in tracers:
     if t not in text:
-        print("ERROR: tracer \"%s\" is not mentioned in doc/environment_variables.md" % t)
+        print(
+            "ERROR: tracer \"%s\" is not mentioned in doc/environment_variables.md"
+            % t)
         errors += 1
 
-
 assert errors == 0
diff --git a/tools/run_tests/sanity/check_version.py b/tools/run_tests/sanity/check_version.py
index b9b6bab..6154b26 100755
--- a/tools/run_tests/sanity/check_version.py
+++ b/tools/run_tests/sanity/check_version.py
@@ -31,56 +31,56 @@
 from expand_version import Version
 
 try:
-  branch_name = subprocess.check_output(
-    'git rev-parse --abbrev-ref HEAD',
-    shell=True)
+    branch_name = subprocess.check_output(
+        'git rev-parse --abbrev-ref HEAD', shell=True)
 except:
-  print('WARNING: not a git repository')
-  branch_name = None
+    print('WARNING: not a git repository')
+    branch_name = None
 
 if branch_name is not None:
-  m = re.match(r'^release-([0-9]+)_([0-9]+)$', branch_name)
-  if m:
-    print('RELEASE branch')
-    # version number should align with the branched version
-    check_version = lambda version: (
-      version.major == int(m.group(1)) and
-      version.minor == int(m.group(2)))
-    warning = 'Version key "%%s" value "%%s" should have a major version %s and minor version %s' % (m.group(1), m.group(2))
-  elif re.match(r'^debian/.*$', branch_name):
-    # no additional version checks for debian branches
-    check_version = lambda version: True
-  else:
-    # all other branches should have a -dev tag
-    check_version = lambda version: version.tag == 'dev'
-    warning = 'Version key "%s" value "%s" should have a -dev tag'
+    m = re.match(r'^release-([0-9]+)_([0-9]+)$', branch_name)
+    if m:
+        print('RELEASE branch')
+        # version number should align with the branched version
+        check_version = lambda version: (
+          version.major == int(m.group(1)) and
+          version.minor == int(m.group(2)))
+        warning = 'Version key "%%s" value "%%s" should have a major version %s and minor version %s' % (
+            m.group(1), m.group(2))
+    elif re.match(r'^debian/.*$', branch_name):
+        # no additional version checks for debian branches
+        check_version = lambda version: True
+    else:
+        # all other branches should have a -dev tag
+        check_version = lambda version: version.tag == 'dev'
+        warning = 'Version key "%s" value "%s" should have a -dev tag'
 else:
-  check_version = lambda version: True
+    check_version = lambda version: True
 
 with open('build.yaml', 'r') as f:
-  build_yaml = yaml.load(f.read())
+    build_yaml = yaml.load(f.read())
 
 settings = build_yaml['settings']
 
 top_version = Version(settings['version'])
 if not check_version(top_version):
-  errors += 1
-  print(warning % ('version', top_version))
+    errors += 1
+    print(warning % ('version', top_version))
 
 for tag, value in settings.iteritems():
-  if re.match(r'^[a-z]+_version$', tag):
-    value = Version(value)
-    if tag != 'core_version':
-      if value.major != top_version.major:
-        errors += 1
-        print('major version mismatch on %s: %d vs %d' % (tag, value.major,
-                                                          top_version.major))
-      if value.minor != top_version.minor:
-        errors += 1
-        print('minor version mismatch on %s: %d vs %d' % (tag, value.minor,
-                                                          top_version.minor))
-    if not check_version(value):
-      errors += 1
-      print(warning % (tag, value))
+    if re.match(r'^[a-z]+_version$', tag):
+        value = Version(value)
+        if tag != 'core_version':
+            if value.major != top_version.major:
+                errors += 1
+                print('major version mismatch on %s: %d vs %d' %
+                      (tag, value.major, top_version.major))
+            if value.minor != top_version.minor:
+                errors += 1
+                print('minor version mismatch on %s: %d vs %d' %
+                      (tag, value.minor, top_version.minor))
+        if not check_version(value):
+            errors += 1
+            print(warning % (tag, value))
 
 sys.exit(errors)
diff --git a/tools/run_tests/sanity/core_banned_functions.py b/tools/run_tests/sanity/core_banned_functions.py
index 1f13905..9ee2896 100755
--- a/tools/run_tests/sanity/core_banned_functions.py
+++ b/tools/run_tests/sanity/core_banned_functions.py
@@ -36,26 +36,28 @@
     'grpc_wsa_error(': ['src/core/lib/iomgr/error.c'],
     'grpc_log_if_error(': ['src/core/lib/iomgr/error.c'],
     'grpc_slice_malloc(': ['src/core/lib/slice/slice.c'],
-    'grpc_closure_create(' : ['src/core/lib/iomgr/closure.c'],
-    'grpc_closure_init(' : ['src/core/lib/iomgr/closure.c'],
-    'grpc_closure_sched(' : ['src/core/lib/iomgr/closure.c'],
-    'grpc_closure_run(' : ['src/core/lib/iomgr/closure.c'],
-    'grpc_closure_list_sched(' : ['src/core/lib/iomgr/closure.c'],
-    'gpr_getenv_silent(' : ['src/core/lib/support/log.c', 'src/core/lib/support/env_linux.c', 
-                            'src/core/lib/support/env_posix.c', 'src/core/lib/support/env_windows.c'],
+    'grpc_closure_create(': ['src/core/lib/iomgr/closure.c'],
+    'grpc_closure_init(': ['src/core/lib/iomgr/closure.c'],
+    'grpc_closure_sched(': ['src/core/lib/iomgr/closure.c'],
+    'grpc_closure_run(': ['src/core/lib/iomgr/closure.c'],
+    'grpc_closure_list_sched(': ['src/core/lib/iomgr/closure.c'],
+    'gpr_getenv_silent(': [
+        'src/core/lib/support/log.c', 'src/core/lib/support/env_linux.c',
+        'src/core/lib/support/env_posix.c', 'src/core/lib/support/env_windows.c'
+    ],
 }
 
 errors = 0
 for root, dirs, files in os.walk('src/core'):
-  for filename in files:
-    path = os.path.join(root, filename)
-    if os.path.splitext(path)[1] != '.c': continue
-    with open(path) as f:
-      text = f.read()
-    for banned, exceptions in BANNED_EXCEPT.items():
-      if path in exceptions: continue
-      if banned in text:
-        print('Illegal use of "%s" in %s' % (banned, path))
-        errors += 1
+    for filename in files:
+        path = os.path.join(root, filename)
+        if os.path.splitext(path)[1] != '.c': continue
+        with open(path) as f:
+            text = f.read()
+        for banned, exceptions in BANNED_EXCEPT.items():
+            if path in exceptions: continue
+            if banned in text:
+                print('Illegal use of "%s" in %s' % (banned, path))
+                errors += 1
 
 assert errors == 0
diff --git a/tools/run_tests/start_port_server.py b/tools/run_tests/start_port_server.py
index 3628750..0eeceb4 100755
--- a/tools/run_tests/start_port_server.py
+++ b/tools/run_tests/start_port_server.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """
 Wrapper around port server starting code.
 
diff --git a/tools/run_tests/task_runner.py b/tools/run_tests/task_runner.py
index a065bb8..794db6e 100755
--- a/tools/run_tests/task_runner.py
+++ b/tools/run_tests/task_runner.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Runs selected gRPC test/build tasks."""
 
 from __future__ import print_function
@@ -32,52 +31,54 @@
 _TARGETS += distribtest_targets.targets()
 _TARGETS += package_targets.targets()
 
+
 def _create_build_map():
-  """Maps task names and labels to list of tasks to be built."""
-  target_build_map = dict([(target.name, [target])
-                           for target in _TARGETS])
-  if len(_TARGETS) > len(target_build_map.keys()):
-    raise Exception('Target names need to be unique')
+    """Maps task names and labels to list of tasks to be built."""
+    target_build_map = dict([(target.name, [target]) for target in _TARGETS])
+    if len(_TARGETS) > len(target_build_map.keys()):
+        raise Exception('Target names need to be unique')
 
-  label_build_map = {}
-  label_build_map['all'] = [t for t in _TARGETS]  # to build all targets
-  for target in _TARGETS:
-    for label in target.labels:
-      if label in label_build_map:
-        label_build_map[label].append(target)
-      else:
-        label_build_map[label] = [target]
+    label_build_map = {}
+    label_build_map['all'] = [t for t in _TARGETS]  # to build all targets
+    for target in _TARGETS:
+        for label in target.labels:
+            if label in label_build_map:
+                label_build_map[label].append(target)
+            else:
+                label_build_map[label] = [target]
 
-  if set(target_build_map.keys()).intersection(label_build_map.keys()):
-    raise Exception('Target names need to be distinct from label names')
-  return dict( target_build_map.items() + label_build_map.items())
+    if set(target_build_map.keys()).intersection(label_build_map.keys()):
+        raise Exception('Target names need to be distinct from label names')
+    return dict(target_build_map.items() + label_build_map.items())
 
 
 _BUILD_MAP = _create_build_map()
 
 argp = argparse.ArgumentParser(description='Runs build/test targets.')
-argp.add_argument('-b', '--build',
-                  choices=sorted(_BUILD_MAP.keys()),
-                  nargs='+',
-                  default=['all'],
-                  help='Target name or target label to build.')
-argp.add_argument('-f', '--filter',
-                  choices=sorted(_BUILD_MAP.keys()),
-                  nargs='+',
-                  default=[],
-                  help='Filter targets to build with AND semantics.')
+argp.add_argument(
+    '-b',
+    '--build',
+    choices=sorted(_BUILD_MAP.keys()),
+    nargs='+',
+    default=['all'],
+    help='Target name or target label to build.')
+argp.add_argument(
+    '-f',
+    '--filter',
+    choices=sorted(_BUILD_MAP.keys()),
+    nargs='+',
+    default=[],
+    help='Filter targets to build with AND semantics.')
 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
-argp.add_argument('-t', '--travis',
-                  default=False,
-                  action='store_const',
-                  const=True)
+argp.add_argument(
+    '-t', '--travis', default=False, action='store_const', const=True)
 
 args = argp.parse_args()
 
 # Figure out which targets to build
 targets = []
 for label in args.build:
-  targets += _BUILD_MAP[label]
+    targets += _BUILD_MAP[label]
 
 # Among targets selected by -b, filter out those that don't match the filter
 targets = [t for t in targets if all(f in t.labels for f in args.filter)]
@@ -86,30 +87,29 @@
 # Execute pre-build phase
 prebuild_jobs = []
 for target in targets:
-  prebuild_jobs += target.pre_build_jobspecs()
+    prebuild_jobs += target.pre_build_jobspecs()
 if prebuild_jobs:
-  num_failures, _ = jobset.run(
-    prebuild_jobs, newline_on_success=True, maxjobs=args.jobs)
-  if num_failures != 0:
-    jobset.message('FAILED', 'Pre-build phase failed.', do_newline=True)
-    sys.exit(1)
+    num_failures, _ = jobset.run(
+        prebuild_jobs, newline_on_success=True, maxjobs=args.jobs)
+    if num_failures != 0:
+        jobset.message('FAILED', 'Pre-build phase failed.', do_newline=True)
+        sys.exit(1)
 
 build_jobs = []
 for target in targets:
-  build_jobs.append(target.build_jobspec())
+    build_jobs.append(target.build_jobspec())
 if not build_jobs:
-  print('Nothing to build.')
-  sys.exit(1)
+    print('Nothing to build.')
+    sys.exit(1)
 
 jobset.message('START', 'Building targets.', do_newline=True)
 num_failures, resultset = jobset.run(
     build_jobs, newline_on_success=True, maxjobs=args.jobs)
-report_utils.render_junit_xml_report(resultset, 'report_taskrunner_sponge_log.xml',
-                                     suite_name='tasks')
+report_utils.render_junit_xml_report(
+    resultset, 'report_taskrunner_sponge_log.xml', suite_name='tasks')
 if num_failures == 0:
-  jobset.message('SUCCESS', 'All targets built successfully.',
-                 do_newline=True)
+    jobset.message(
+        'SUCCESS', 'All targets built successfully.', do_newline=True)
 else:
-  jobset.message('FAILED', 'Failed to build targets.',
-                 do_newline=True)
-  sys.exit(1)
+    jobset.message('FAILED', 'Failed to build targets.', do_newline=True)
+    sys.exit(1)